diff --git a/.genignore b/.genignore index be3ba87c..6bd11b26 100644 --- a/.genignore +++ b/.genignore @@ -1,4 +1,6 @@ pyproject.toml examples/* -src/mistral/extra/* -pylintrc \ No newline at end of file +/utils/* +src/mistralai/extra/* +pylintrc +scripts/prepare_readme.py diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index 0bbf7126..9dcb04e4 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -14,20 +14,19 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4 with: python-version: '3.12' - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Install dependencies run: | - touch README-PYPI.md - poetry install --all-extras + uv sync --all-extras # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion - name: Run all linters diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index 7d8eb792..cecefb0e 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -14,19 +14,19 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 with: python-version: ${{ matrix.python-version }} - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Set VERSION run: | @@ -39,24 +39,12 @@ jobs: - name: Build the package run: | - touch README-PYPI.md # Create this file since the client is not built by Speakeasy - poetry build + uv build - - name: For python 3.9, install the client and run examples without extra dependencies. - if: matrix.python-version == '3.9' - run: | - PACKAGE="dist/$(ls dist | grep whl | head -n 1)" - python3 -m pip install "$PACKAGE" - ./scripts/run_examples.sh --no-extra-dep - env: - MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} - MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} - - - name: For python 3.10+, install client with extras and run all examples. - if: matrix.python-version != '3.9' + - name: Install client with extras and run all examples. run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" - python3 -m pip install "$PACKAGE" + uv pip install --system "$PACKAGE" ./scripts/run_examples.sh env: MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 7ec5bb8d..b5d0741b 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -16,14 +16,72 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 with: force: ${{ github.event.inputs.force }} mode: pr set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest - target: mistral-python-sdk-azure + target: mistralai-azure-sdk secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Find PR branch + id: find-pr + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} + + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + CURRENT=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/azure + + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/azure/pyproject.toml packages/azure/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align Azure pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index c4da64f7..05f88e25 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -16,14 +16,72 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 with: force: ${{ github.event.inputs.force }} mode: pr set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest - target: mistral-python-sdk-google-cloud + target: mistralai-gcp-sdk secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Find PR branch + id: find-pr + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} + + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + CURRENT=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/gcp + + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/gcp/pyproject.toml packages/gcp/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align GCP pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 1d80cddc..59fe1150 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 with: force: ${{ github.event.inputs.force }} mode: pr @@ -24,6 +24,64 @@ jobs: speakeasy_version: latest target: mistralai-sdk secrets: - github_access_token: ${{ secrets.SPEAKEASY_WORKFLOW_GITHUB_PAT }} + github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Find PR branch + id: find-pr + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} + + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + CURRENT=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" + + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add pyproject.toml uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index 46af0ad3..44635571 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -5,15 +5,25 @@ permissions: pull-requests: write statuses: write "on": + workflow_dispatch: + inputs: + confirm_publish: + description: 'WARNING: This will publish v2 SDK (mistralai.client namespace) which is still WIP/alpha. To publish v1 (mistralai namespace), use the v1 branch instead. Type "publish" to confirm.' + required: false + type: string push: branches: - - main + - v1 paths: - RELEASES.md - "*/RELEASES.md" jobs: publish: - uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@v15 + # Auto-publish from v1 branch; require manual confirmation from main + if: | + github.ref == 'refs/heads/v1' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.confirm_publish == 'publish') + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml index 230066cb..9a53c1e5 100644 --- a/.github/workflows/test_custom_code.yaml +++ b/.github/workflows/test_custom_code.yaml @@ -14,35 +14,23 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Set up Python id: setup-python - uses: actions/setup-python@v4 + uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4 with: python-version: '3.12' - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - with: - virtualenvs-create: true - virtualenvs-in-project: true - virtualenvs-path: .venv - installer-parallel: true - - - name: Load cached venv - id: cached-poetry-dependencies - uses: actions/cache@v4 - with: - path: .venv - key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Install dependencies - # Install dependencies if cache does not exist - if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - run: poetry install --no-interaction --no-root + run: | + uv sync --all-extras - name: Run the 'src/mistralai/extra' package unit tests - run: | - source .venv/bin/activate - python3.12 -m unittest discover -s src/mistralai/extra/tests -t src + run: uv run python3.12 -m unittest discover -s src/mistralai/extra/tests -t src + + - name: Run pytest for repository tests + run: uv run pytest tests/ diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml new file mode 100644 index 00000000..78b5317b --- /dev/null +++ b/.github/workflows/update_speakeasy.yaml @@ -0,0 +1,110 @@ +name: Update Speakeasy SDKs +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +on: + workflow_dispatch: + inputs: + version: + description: 'Speakeasy version to update to (e.g., 1.580.2)' + required: true + type: string + targets: + description: 'Targets to update.' + required: true + type: choice + options: + - mistralai-sdk + - mistralai-azure-sdk + - mistralai-gcp-sdk + - all + +jobs: + update-sdks: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: '3.11' + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Install dependencies + run: | + uv sync --group dev --group lint --no-default-groups + + - name: Install Speakeasy CLI + run: | + curl -fsSL https://go.speakeasy.com/cli-install.sh | sh + speakeasy --version + + - name: Configure Git + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git config --local --type bool push.autoSetupRemote true + + - name: Create branch + run: | + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + echo "TIMESTAMP=$TIMESTAMP" >> $GITHUB_ENV + git checkout -b update-speakeasy-to-${{ github.event.inputs.version }}-$TIMESTAMP + + - name: Update Speakeasy SDKs + run: | + # Split targets and build command with multiple --targets flags + TARGETS_ARGS="" + for target in ${{ github.event.inputs.targets }}; do + TARGETS_ARGS="$TARGETS_ARGS --targets $target" + done + + uv run inv update-speakeasy \ + --version "${{ github.event.inputs.version }}" \ + $TARGETS_ARGS + env: + SPEAKEASY_API_KEY: ${{ secrets.SPEAKEASY_API_KEY }} + + - name: Check for changes + id: check-changes + run: | + if [ -n "$(git status --porcelain)" ]; then + echo "has_changes=true" >> $GITHUB_OUTPUT + echo "Files changed:" + git status --porcelain + else + echo "has_changes=false" >> $GITHUB_OUTPUT + echo "No changes detected" + fi + + - name: Commit and push changes + if: steps.check-changes.outputs.has_changes == 'true' + run: | + git add . + git commit -m "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" + git push origin update-speakeasy-to-${{ github.event.inputs.version }}-${{ env.TIMESTAMP }} + + - name: Create Pull Request + if: steps.check-changes.outputs.has_changes == 'true' + run: | + gh pr create \ + --base main \ + --head update-speakeasy-to-${{ github.event.inputs.version }}-${{ env.TIMESTAMP }} \ + --title "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" \ + --body "This PR updates the Speakeasy SDKs to version ${{ github.event.inputs.version }}. It was automatically generated by the [Update Speakeasy workflow](.github/workflows/update_speakeasy.yaml)." \ + --label automated \ + --label speakeasy-update \ + --assignee ${{ github.actor }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Comment on workflow run + if: steps.check-changes.outputs.has_changes == 'false' + run: | + echo "No changes were detected. The SDKs are already up to date with version ${{ github.event.inputs.version }}." diff --git a/.gitignore b/.gitignore index 954adb7c..cf2de5ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.env.local +.idea **/__pycache__/ **/.speakeasy/temp/ **/.speakeasy/logs/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39e850eb..9be71784 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,17 +4,17 @@ repos: hooks: - id: ruff args: [--fix] - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ - repo: https://github.com/RobertCraigie/pyright-python rev: v1.1.401 hooks: - id: pyright - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 73686adb..7314b7b1 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,1246 +1,4027 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: c33c788946fa446bfcf90b60f68abde9 + docChecksum: b66b034aac7aa9b38c4fb47a3b3d843e docVersion: 1.0.0 - speakeasyVersion: 1.568.2 - generationVersion: 2.634.2 - releaseVersion: 1.9.3 - configChecksum: 0f65a9bdd8df5ae03eaaaea3ab055bf1 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0b1 + configChecksum: 871b5a7d3687bd2a9ebd0e205e4b36a3 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true +persistentEdits: + generation_id: 1527268d-25cf-4a8c-8a67-09694eaf0d79 + pristine_commit_hash: 5642b69da5a9a00af1e84ca689b7587f2269d0c4 + pristine_tree_hash: f9eae7c82e85b3114e342a4b6500b9704a266493 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 - constsAndDefaults: 1.0.5 - core: 5.19.3 + configurableModuleName: 0.2.0 + constsAndDefaults: 1.0.7 + core: 6.0.12 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.1 + globalServerURLs: 3.2.0 + includes: 3.0.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.1 - responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.1.0 - serverEvents: 1.0.7 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.4 uploadStreams: 1.0.0 -generatedFiles: - - .gitattributes - - .python-version - - .vscode/settings.json - - USAGE.md - - docs/models/agent.md - - docs/models/agentconversation.md - - docs/models/agentconversationobject.md - - docs/models/agentcreationrequest.md - - docs/models/agentcreationrequesttools.md - - docs/models/agenthandoffdoneevent.md - - docs/models/agenthandoffdoneeventtype.md - - docs/models/agenthandoffentry.md - - docs/models/agenthandoffentryobject.md - - docs/models/agenthandoffentrytype.md - - docs/models/agenthandoffstartedevent.md - - docs/models/agenthandoffstartedeventtype.md - - docs/models/agentobject.md - - docs/models/agentsapiv1agentsgetrequest.md - - docs/models/agentsapiv1agentslistrequest.md - - docs/models/agentsapiv1agentsupdaterequest.md - - docs/models/agentsapiv1agentsupdateversionrequest.md - - docs/models/agentsapiv1conversationsappendrequest.md - - docs/models/agentsapiv1conversationsappendstreamrequest.md - - docs/models/agentsapiv1conversationsgetrequest.md - - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md - - docs/models/agentsapiv1conversationshistoryrequest.md - - docs/models/agentsapiv1conversationslistrequest.md - - docs/models/agentsapiv1conversationsmessagesrequest.md - - docs/models/agentsapiv1conversationsrestartrequest.md - - docs/models/agentsapiv1conversationsrestartstreamrequest.md - - docs/models/agentscompletionrequest.md - - docs/models/agentscompletionrequestmessages.md - - docs/models/agentscompletionrequeststop.md - - docs/models/agentscompletionrequesttoolchoice.md - - docs/models/agentscompletionstreamrequest.md - - docs/models/agentscompletionstreamrequestmessages.md - - docs/models/agentscompletionstreamrequeststop.md - - docs/models/agentscompletionstreamrequesttoolchoice.md - - docs/models/agenttools.md - - docs/models/agentupdaterequest.md - - docs/models/agentupdaterequesttools.md - - docs/models/apiendpoint.md - - docs/models/archiveftmodelout.md - - docs/models/archiveftmodeloutobject.md - - docs/models/arguments.md - - docs/models/assistantmessage.md - - docs/models/assistantmessagecontent.md - - docs/models/assistantmessagerole.md - - docs/models/audiochunk.md - - docs/models/audiochunktype.md - - docs/models/audiotranscriptionrequest.md - - docs/models/audiotranscriptionrequeststream.md - - docs/models/basemodelcard.md - - docs/models/basemodelcardtype.md - - docs/models/batcherror.md - - docs/models/batchjobin.md - - docs/models/batchjobout.md - - docs/models/batchjoboutobject.md - - docs/models/batchjobsout.md - - docs/models/batchjobsoutobject.md - - docs/models/batchjobstatus.md - - docs/models/builtinconnectors.md - - docs/models/chatclassificationrequest.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/chatcompletionstreamrequestmessages.md - - docs/models/chatcompletionstreamrequeststop.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatmoderationrequest.md - - docs/models/chatmoderationrequestinputs.md - - docs/models/checkpointout.md - - docs/models/classificationrequest.md - - docs/models/classificationrequestinputs.md - - docs/models/classificationresponse.md - - docs/models/classificationtargetresult.md - - docs/models/classifierdetailedjobout.md - - docs/models/classifierdetailedjoboutintegrations.md - - docs/models/classifierdetailedjoboutjobtype.md - - docs/models/classifierdetailedjoboutobject.md - - docs/models/classifierdetailedjoboutstatus.md - - docs/models/classifierftmodelout.md - - docs/models/classifierftmodeloutmodeltype.md - - docs/models/classifierftmodeloutobject.md - - docs/models/classifierjobout.md - - docs/models/classifierjoboutintegrations.md - - docs/models/classifierjoboutjobtype.md - - docs/models/classifierjoboutobject.md - - docs/models/classifierjoboutstatus.md - - docs/models/classifiertargetin.md - - docs/models/classifiertargetout.md - - docs/models/classifiertrainingparameters.md - - docs/models/classifiertrainingparametersin.md - - docs/models/codeinterpretertool.md - - docs/models/codeinterpretertooltype.md - - docs/models/completionargs.md - - docs/models/completionargsstop.md - - docs/models/completionchunk.md - - docs/models/completiondetailedjobout.md - - docs/models/completiondetailedjoboutintegrations.md - - docs/models/completiondetailedjoboutjobtype.md - - docs/models/completiondetailedjoboutobject.md - - docs/models/completiondetailedjoboutrepositories.md - - docs/models/completiondetailedjoboutstatus.md - - docs/models/completionevent.md - - docs/models/completionftmodelout.md - - docs/models/completionftmodeloutobject.md - - docs/models/completionjobout.md - - docs/models/completionjoboutobject.md - - docs/models/completionresponsestreamchoice.md - - docs/models/completionresponsestreamchoicefinishreason.md - - docs/models/completiontrainingparameters.md - - docs/models/completiontrainingparametersin.md - - docs/models/content.md - - docs/models/contentchunk.md - - docs/models/conversationappendrequest.md - - docs/models/conversationappendrequesthandoffexecution.md - - docs/models/conversationappendstreamrequest.md - - docs/models/conversationappendstreamrequesthandoffexecution.md - - docs/models/conversationevents.md - - docs/models/conversationeventsdata.md - - docs/models/conversationhistory.md - - docs/models/conversationhistoryobject.md - - docs/models/conversationinputs.md - - docs/models/conversationmessages.md - - docs/models/conversationmessagesobject.md - - docs/models/conversationrequest.md - - docs/models/conversationresponse.md - - docs/models/conversationresponseobject.md - - docs/models/conversationrestartrequest.md - - docs/models/conversationrestartrequesthandoffexecution.md - - docs/models/conversationrestartstreamrequest.md - - docs/models/conversationrestartstreamrequesthandoffexecution.md - - docs/models/conversationstreamrequest.md - - docs/models/conversationstreamrequesthandoffexecution.md - - docs/models/conversationstreamrequesttools.md - - docs/models/conversationusageinfo.md - - docs/models/data.md - - docs/models/deletefileout.md - - docs/models/deletemodelout.md - - docs/models/deletemodelv1modelsmodeliddeleterequest.md - - docs/models/deltamessage.md - - docs/models/document.md - - docs/models/documentlibrarytool.md - - docs/models/documentlibrarytooltype.md - - docs/models/documentout.md - - docs/models/documenttextcontent.md - - docs/models/documentupdatein.md - - docs/models/documenturlchunk.md - - docs/models/documenturlchunktype.md - - docs/models/embeddingdtype.md - - docs/models/embeddingrequest.md - - docs/models/embeddingrequestinputs.md - - docs/models/embeddingresponse.md - - docs/models/embeddingresponsedata.md - - docs/models/entitytype.md - - docs/models/entries.md - - docs/models/eventout.md - - docs/models/file.md - - docs/models/filechunk.md - - docs/models/filepurpose.md - - docs/models/filesapiroutesdeletefilerequest.md - - docs/models/filesapiroutesdownloadfilerequest.md - - docs/models/filesapiroutesgetsignedurlrequest.md - - docs/models/filesapirouteslistfilesrequest.md - - docs/models/filesapiroutesretrievefilerequest.md - - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/fileschema.md - - docs/models/filesignedurl.md - - docs/models/fimcompletionrequest.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/finetuneablemodeltype.md - - docs/models/finishreason.md - - docs/models/ftclassifierlossfunction.md - - docs/models/ftmodelcapabilitiesout.md - - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - - docs/models/function.md - - docs/models/functioncall.md - - docs/models/functioncallentry.md - - docs/models/functioncallentryarguments.md - - docs/models/functioncallentryobject.md - - docs/models/functioncallentrytype.md - - docs/models/functioncallevent.md - - docs/models/functioncalleventtype.md - - docs/models/functionname.md - - docs/models/functionresultentry.md - - docs/models/functionresultentryobject.md - - docs/models/functionresultentrytype.md - - docs/models/functiontool.md - - docs/models/functiontooltype.md - - docs/models/githubrepositoryin.md - - docs/models/githubrepositoryintype.md - - docs/models/githubrepositoryout.md - - docs/models/githubrepositoryouttype.md - - docs/models/handoffexecution.md - - docs/models/httpvalidationerror.md - - docs/models/hyperparameters.md - - docs/models/imagegenerationtool.md - - docs/models/imagegenerationtooltype.md - - docs/models/imageurl.md - - docs/models/imageurlchunk.md - - docs/models/imageurlchunkimageurl.md - - docs/models/imageurlchunktype.md - - docs/models/inputentries.md - - docs/models/inputs.md - - docs/models/instructrequest.md - - docs/models/instructrequestinputs.md - - docs/models/instructrequestinputsmessages.md - - docs/models/instructrequestmessages.md - - docs/models/integrations.md - - docs/models/jobin.md - - docs/models/jobinintegrations.md - - docs/models/jobinrepositories.md - - docs/models/jobmetadataout.md - - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md - - docs/models/jobsapiroutesbatchgetbatchjobrequest.md - - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md - - docs/models/jobsout.md - - docs/models/jobsoutdata.md - - docs/models/jobsoutobject.md - - docs/models/jobtype.md - - docs/models/jsonschema.md - - docs/models/legacyjobmetadataout.md - - docs/models/legacyjobmetadataoutobject.md - - docs/models/librariesdeletev1request.md - - docs/models/librariesdocumentsdeletev1request.md - - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md - - docs/models/librariesdocumentsgetsignedurlv1request.md - - docs/models/librariesdocumentsgetstatusv1request.md - - docs/models/librariesdocumentsgettextcontentv1request.md - - docs/models/librariesdocumentsgetv1request.md - - docs/models/librariesdocumentslistv1request.md - - docs/models/librariesdocumentsreprocessv1request.md - - docs/models/librariesdocumentsupdatev1request.md - - docs/models/librariesdocumentsuploadv1documentupload.md - - docs/models/librariesdocumentsuploadv1request.md - - docs/models/librariesgetv1request.md - - docs/models/librariessharecreatev1request.md - - docs/models/librariessharedeletev1request.md - - docs/models/librariessharelistv1request.md - - docs/models/librariesupdatev1request.md - - docs/models/libraryin.md - - docs/models/libraryinupdate.md - - docs/models/libraryout.md - - docs/models/listdocumentout.md - - docs/models/listfilesout.md - - docs/models/listlibraryout.md - - docs/models/listsharingout.md - - docs/models/loc.md - - docs/models/messageentries.md - - docs/models/messageinputcontentchunks.md - - docs/models/messageinputentry.md - - docs/models/messageinputentrycontent.md - - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - - docs/models/messageoutputcontentchunks.md - - docs/models/messageoutputentry.md - - docs/models/messageoutputentrycontent.md - - docs/models/messageoutputentryobject.md - - docs/models/messageoutputentryrole.md - - docs/models/messageoutputentrytype.md - - docs/models/messageoutputevent.md - - docs/models/messageoutputeventcontent.md - - docs/models/messageoutputeventrole.md - - docs/models/messageoutputeventtype.md - - docs/models/messages.md - - docs/models/metadata.md - - docs/models/metricout.md - - docs/models/mistralpromptmode.md - - docs/models/modelcapabilities.md - - docs/models/modelconversation.md - - docs/models/modelconversationobject.md - - docs/models/modelconversationtools.md - - docs/models/modellist.md - - docs/models/modeltype.md - - docs/models/moderationobject.md - - docs/models/moderationresponse.md - - docs/models/object.md - - docs/models/ocrimageobject.md - - docs/models/ocrpagedimensions.md - - docs/models/ocrpageobject.md - - docs/models/ocrrequest.md - - docs/models/ocrresponse.md - - docs/models/ocrusageinfo.md - - docs/models/one.md - - docs/models/outputcontentchunks.md - - docs/models/outputs.md - - docs/models/paginationinfo.md - - docs/models/prediction.md - - docs/models/processingstatusout.md - - docs/models/queryparamstatus.md - - docs/models/referencechunk.md - - docs/models/referencechunktype.md - - docs/models/repositories.md - - docs/models/response1.md - - docs/models/responsebody.md - - docs/models/responsedoneevent.md - - docs/models/responsedoneeventtype.md - - docs/models/responseerrorevent.md - - docs/models/responseerroreventtype.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/responsestartedevent.md - - docs/models/responsestartedeventtype.md - - docs/models/retrievefileout.md - - docs/models/retrievemodelv1modelsmodelidgetrequest.md - - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md - - docs/models/role.md - - docs/models/sampletype.md - - docs/models/security.md - - docs/models/shareenum.md - - docs/models/sharingdelete.md - - docs/models/sharingin.md - - docs/models/sharingout.md - - docs/models/source.md - - docs/models/ssetypes.md - - docs/models/status.md - - docs/models/stop.md - - docs/models/systemmessage.md - - docs/models/systemmessagecontent.md - - docs/models/textchunk.md - - docs/models/textchunktype.md - - docs/models/thinkchunk.md - - docs/models/thinkchunktype.md - - docs/models/thinking.md - - docs/models/timestampgranularity.md - - docs/models/tool.md - - docs/models/toolcall.md - - docs/models/toolchoice.md - - docs/models/toolchoiceenum.md - - docs/models/toolexecutiondeltaevent.md - - docs/models/toolexecutiondeltaeventtype.md - - docs/models/toolexecutiondoneevent.md - - docs/models/toolexecutiondoneeventtype.md - - docs/models/toolexecutionentry.md - - docs/models/toolexecutionentryobject.md - - docs/models/toolexecutionentrytype.md - - docs/models/toolexecutionstartedevent.md - - docs/models/toolexecutionstartedeventtype.md - - docs/models/toolfilechunk.md - - docs/models/toolfilechunktype.md - - docs/models/toolmessage.md - - docs/models/toolmessagecontent.md - - docs/models/toolmessagerole.md - - docs/models/toolreferencechunk.md - - docs/models/toolreferencechunktype.md - - docs/models/tools.md - - docs/models/tooltypes.md - - docs/models/trainingfile.md - - docs/models/transcriptionresponse.md - - docs/models/transcriptionsegmentchunk.md - - docs/models/transcriptionstreamdone.md - - docs/models/transcriptionstreamdonetype.md - - docs/models/transcriptionstreamevents.md - - docs/models/transcriptionstreameventsdata.md - - docs/models/transcriptionstreameventtypes.md - - docs/models/transcriptionstreamlanguage.md - - docs/models/transcriptionstreamlanguagetype.md - - docs/models/transcriptionstreamsegmentdelta.md - - docs/models/transcriptionstreamsegmentdeltatype.md - - docs/models/transcriptionstreamtextdelta.md - - docs/models/transcriptionstreamtextdeltatype.md - - docs/models/two.md - - docs/models/type.md - - docs/models/unarchiveftmodelout.md - - docs/models/unarchiveftmodeloutobject.md - - docs/models/updateftmodelin.md - - docs/models/uploadfileout.md - - docs/models/usageinfo.md - - docs/models/usermessage.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/utils/retryconfig.md - - docs/models/validationerror.md - - docs/models/wandbintegration.md - - docs/models/wandbintegrationout.md - - docs/models/wandbintegrationouttype.md - - docs/models/wandbintegrationtype.md - - docs/models/websearchpremiumtool.md - - docs/models/websearchpremiumtooltype.md - - docs/models/websearchtool.md - - docs/models/websearchtooltype.md - - docs/sdks/accesses/README.md - - docs/sdks/agents/README.md - - docs/sdks/audio/README.md - - docs/sdks/batch/README.md - - docs/sdks/beta/README.md - - docs/sdks/chat/README.md - - docs/sdks/classifiers/README.md - - docs/sdks/conversations/README.md - - docs/sdks/documents/README.md - - docs/sdks/embeddings/README.md - - docs/sdks/files/README.md - - docs/sdks/fim/README.md - - docs/sdks/finetuning/README.md - - docs/sdks/jobs/README.md - - docs/sdks/libraries/README.md - - docs/sdks/mistral/README.md - - docs/sdks/mistralagents/README.md - - docs/sdks/mistraljobs/README.md - - docs/sdks/models/README.md - - docs/sdks/ocr/README.md - - docs/sdks/transcriptions/README.md - - poetry.toml - - py.typed - - scripts/prepare_readme.py - - scripts/publish.sh - - src/mistralai/__init__.py - - src/mistralai/_hooks/__init__.py - - src/mistralai/_hooks/sdkhooks.py - - src/mistralai/_hooks/types.py - - src/mistralai/_version.py - - src/mistralai/accesses.py - - src/mistralai/agents.py - - src/mistralai/audio.py - - src/mistralai/basesdk.py - - src/mistralai/batch.py - - src/mistralai/beta.py - - src/mistralai/chat.py - - src/mistralai/classifiers.py - - src/mistralai/conversations.py - - src/mistralai/documents.py - - src/mistralai/embeddings.py - - src/mistralai/files.py - - src/mistralai/fim.py - - src/mistralai/fine_tuning.py - - src/mistralai/httpclient.py - - src/mistralai/jobs.py - - src/mistralai/libraries.py - - src/mistralai/mistral_agents.py - - src/mistralai/mistral_jobs.py - - src/mistralai/models/__init__.py - - src/mistralai/models/agent.py - - src/mistralai/models/agentconversation.py - - src/mistralai/models/agentcreationrequest.py - - src/mistralai/models/agenthandoffdoneevent.py - - src/mistralai/models/agenthandoffentry.py - - src/mistralai/models/agenthandoffstartedevent.py - - src/mistralai/models/agents_api_v1_agents_getop.py - - src/mistralai/models/agents_api_v1_agents_listop.py - - src/mistralai/models/agents_api_v1_agents_update_versionop.py - - src/mistralai/models/agents_api_v1_agents_updateop.py - - src/mistralai/models/agents_api_v1_conversations_append_streamop.py - - src/mistralai/models/agents_api_v1_conversations_appendop.py - - src/mistralai/models/agents_api_v1_conversations_getop.py - - src/mistralai/models/agents_api_v1_conversations_historyop.py - - src/mistralai/models/agents_api_v1_conversations_listop.py - - src/mistralai/models/agents_api_v1_conversations_messagesop.py - - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py - - src/mistralai/models/agents_api_v1_conversations_restartop.py - - src/mistralai/models/agentscompletionrequest.py - - src/mistralai/models/agentscompletionstreamrequest.py - - src/mistralai/models/agentupdaterequest.py - - src/mistralai/models/apiendpoint.py - - src/mistralai/models/archiveftmodelout.py - - src/mistralai/models/assistantmessage.py - - src/mistralai/models/audiochunk.py - - src/mistralai/models/audiotranscriptionrequest.py - - src/mistralai/models/audiotranscriptionrequeststream.py - - src/mistralai/models/basemodelcard.py - - src/mistralai/models/batcherror.py - - src/mistralai/models/batchjobin.py - - src/mistralai/models/batchjobout.py - - src/mistralai/models/batchjobsout.py - - src/mistralai/models/batchjobstatus.py - - src/mistralai/models/builtinconnectors.py - - src/mistralai/models/chatclassificationrequest.py - - src/mistralai/models/chatcompletionchoice.py - - src/mistralai/models/chatcompletionrequest.py - - src/mistralai/models/chatcompletionresponse.py - - src/mistralai/models/chatcompletionstreamrequest.py - - src/mistralai/models/chatmoderationrequest.py - - src/mistralai/models/checkpointout.py - - src/mistralai/models/classificationrequest.py - - src/mistralai/models/classificationresponse.py - - src/mistralai/models/classificationtargetresult.py - - src/mistralai/models/classifierdetailedjobout.py - - src/mistralai/models/classifierftmodelout.py - - src/mistralai/models/classifierjobout.py - - src/mistralai/models/classifiertargetin.py - - src/mistralai/models/classifiertargetout.py - - src/mistralai/models/classifiertrainingparameters.py - - src/mistralai/models/classifiertrainingparametersin.py - - src/mistralai/models/codeinterpretertool.py - - src/mistralai/models/completionargs.py - - src/mistralai/models/completionargsstop.py - - src/mistralai/models/completionchunk.py - - src/mistralai/models/completiondetailedjobout.py - - src/mistralai/models/completionevent.py - - src/mistralai/models/completionftmodelout.py - - src/mistralai/models/completionjobout.py - - src/mistralai/models/completionresponsestreamchoice.py - - src/mistralai/models/completiontrainingparameters.py - - src/mistralai/models/completiontrainingparametersin.py - - src/mistralai/models/contentchunk.py - - src/mistralai/models/conversationappendrequest.py - - src/mistralai/models/conversationappendstreamrequest.py - - src/mistralai/models/conversationevents.py - - src/mistralai/models/conversationhistory.py - - src/mistralai/models/conversationinputs.py - - src/mistralai/models/conversationmessages.py - - src/mistralai/models/conversationrequest.py - - src/mistralai/models/conversationresponse.py - - src/mistralai/models/conversationrestartrequest.py - - src/mistralai/models/conversationrestartstreamrequest.py - - src/mistralai/models/conversationstreamrequest.py - - src/mistralai/models/conversationusageinfo.py - - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - - src/mistralai/models/deletefileout.py - - src/mistralai/models/deletemodelout.py - - src/mistralai/models/deltamessage.py - - src/mistralai/models/documentlibrarytool.py - - src/mistralai/models/documentout.py - - src/mistralai/models/documenttextcontent.py - - src/mistralai/models/documentupdatein.py - - src/mistralai/models/documenturlchunk.py - - src/mistralai/models/embeddingdtype.py - - src/mistralai/models/embeddingrequest.py - - src/mistralai/models/embeddingresponse.py - - src/mistralai/models/embeddingresponsedata.py - - src/mistralai/models/entitytype.py - - src/mistralai/models/eventout.py - - src/mistralai/models/file.py - - src/mistralai/models/filechunk.py - - src/mistralai/models/filepurpose.py - - src/mistralai/models/files_api_routes_delete_fileop.py - - src/mistralai/models/files_api_routes_download_fileop.py - - src/mistralai/models/files_api_routes_get_signed_urlop.py - - src/mistralai/models/files_api_routes_list_filesop.py - - src/mistralai/models/files_api_routes_retrieve_fileop.py - - src/mistralai/models/files_api_routes_upload_fileop.py - - src/mistralai/models/fileschema.py - - src/mistralai/models/filesignedurl.py - - src/mistralai/models/fimcompletionrequest.py - - src/mistralai/models/fimcompletionresponse.py - - src/mistralai/models/fimcompletionstreamrequest.py - - src/mistralai/models/finetuneablemodeltype.py - - src/mistralai/models/ftclassifierlossfunction.py - - src/mistralai/models/ftmodelcapabilitiesout.py - - src/mistralai/models/ftmodelcard.py - - src/mistralai/models/function.py - - src/mistralai/models/functioncall.py - - src/mistralai/models/functioncallentry.py - - src/mistralai/models/functioncallentryarguments.py - - src/mistralai/models/functioncallevent.py - - src/mistralai/models/functionname.py - - src/mistralai/models/functionresultentry.py - - src/mistralai/models/functiontool.py - - src/mistralai/models/githubrepositoryin.py - - src/mistralai/models/githubrepositoryout.py - - src/mistralai/models/httpvalidationerror.py - - src/mistralai/models/imagegenerationtool.py - - src/mistralai/models/imageurl.py - - src/mistralai/models/imageurlchunk.py - - src/mistralai/models/inputentries.py - - src/mistralai/models/inputs.py - - src/mistralai/models/instructrequest.py - - src/mistralai/models/jobin.py - - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py - - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py - - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - - src/mistralai/models/jobsout.py - - src/mistralai/models/jsonschema.py - - src/mistralai/models/legacyjobmetadataout.py - - src/mistralai/models/libraries_delete_v1op.py - - src/mistralai/models/libraries_documents_delete_v1op.py - - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py - - src/mistralai/models/libraries_documents_get_signed_url_v1op.py - - src/mistralai/models/libraries_documents_get_status_v1op.py - - src/mistralai/models/libraries_documents_get_text_content_v1op.py - - src/mistralai/models/libraries_documents_get_v1op.py - - src/mistralai/models/libraries_documents_list_v1op.py - - src/mistralai/models/libraries_documents_reprocess_v1op.py - - src/mistralai/models/libraries_documents_update_v1op.py - - src/mistralai/models/libraries_documents_upload_v1op.py - - src/mistralai/models/libraries_get_v1op.py - - src/mistralai/models/libraries_share_create_v1op.py - - src/mistralai/models/libraries_share_delete_v1op.py - - src/mistralai/models/libraries_share_list_v1op.py - - src/mistralai/models/libraries_update_v1op.py - - src/mistralai/models/libraryin.py - - src/mistralai/models/libraryinupdate.py - - src/mistralai/models/libraryout.py - - src/mistralai/models/listdocumentout.py - - src/mistralai/models/listfilesout.py - - src/mistralai/models/listlibraryout.py - - src/mistralai/models/listsharingout.py - - src/mistralai/models/messageentries.py - - src/mistralai/models/messageinputcontentchunks.py - - src/mistralai/models/messageinputentry.py - - src/mistralai/models/messageoutputcontentchunks.py - - src/mistralai/models/messageoutputentry.py - - src/mistralai/models/messageoutputevent.py - - src/mistralai/models/metricout.py - - src/mistralai/models/mistralpromptmode.py - - src/mistralai/models/modelcapabilities.py - - src/mistralai/models/modelconversation.py - - src/mistralai/models/modellist.py - - src/mistralai/models/moderationobject.py - - src/mistralai/models/moderationresponse.py - - src/mistralai/models/ocrimageobject.py - - src/mistralai/models/ocrpagedimensions.py - - src/mistralai/models/ocrpageobject.py - - src/mistralai/models/ocrrequest.py - - src/mistralai/models/ocrresponse.py - - src/mistralai/models/ocrusageinfo.py - - src/mistralai/models/outputcontentchunks.py - - src/mistralai/models/paginationinfo.py - - src/mistralai/models/prediction.py - - src/mistralai/models/processingstatusout.py - - src/mistralai/models/referencechunk.py - - src/mistralai/models/responsedoneevent.py - - src/mistralai/models/responseerrorevent.py - - src/mistralai/models/responseformat.py - - src/mistralai/models/responseformats.py - - src/mistralai/models/responsestartedevent.py - - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - - src/mistralai/models/retrievefileout.py - - src/mistralai/models/sampletype.py - - src/mistralai/models/sdkerror.py - - src/mistralai/models/security.py - - src/mistralai/models/shareenum.py - - src/mistralai/models/sharingdelete.py - - src/mistralai/models/sharingin.py - - src/mistralai/models/sharingout.py - - src/mistralai/models/source.py - - src/mistralai/models/ssetypes.py - - src/mistralai/models/systemmessage.py - - src/mistralai/models/textchunk.py - - src/mistralai/models/thinkchunk.py - - src/mistralai/models/timestampgranularity.py - - src/mistralai/models/tool.py - - src/mistralai/models/toolcall.py - - src/mistralai/models/toolchoice.py - - src/mistralai/models/toolchoiceenum.py - - src/mistralai/models/toolexecutiondeltaevent.py - - src/mistralai/models/toolexecutiondoneevent.py - - src/mistralai/models/toolexecutionentry.py - - src/mistralai/models/toolexecutionstartedevent.py - - src/mistralai/models/toolfilechunk.py - - src/mistralai/models/toolmessage.py - - src/mistralai/models/toolreferencechunk.py - - src/mistralai/models/tooltypes.py - - src/mistralai/models/trainingfile.py - - src/mistralai/models/transcriptionresponse.py - - src/mistralai/models/transcriptionsegmentchunk.py - - src/mistralai/models/transcriptionstreamdone.py - - src/mistralai/models/transcriptionstreamevents.py - - src/mistralai/models/transcriptionstreameventtypes.py - - src/mistralai/models/transcriptionstreamlanguage.py - - src/mistralai/models/transcriptionstreamsegmentdelta.py - - src/mistralai/models/transcriptionstreamtextdelta.py - - src/mistralai/models/unarchiveftmodelout.py - - src/mistralai/models/updateftmodelin.py - - src/mistralai/models/uploadfileout.py - - src/mistralai/models/usageinfo.py - - src/mistralai/models/usermessage.py - - src/mistralai/models/validationerror.py - - src/mistralai/models/wandbintegration.py - - src/mistralai/models/wandbintegrationout.py - - src/mistralai/models/websearchpremiumtool.py - - src/mistralai/models/websearchtool.py - - src/mistralai/models_.py - - src/mistralai/ocr.py - - src/mistralai/py.typed - - src/mistralai/sdk.py - - src/mistralai/sdkconfiguration.py - - src/mistralai/transcriptions.py - - src/mistralai/types/__init__.py - - src/mistralai/types/basemodel.py - - src/mistralai/utils/__init__.py - - src/mistralai/utils/annotations.py - - src/mistralai/utils/datetimes.py - - src/mistralai/utils/enums.py - - src/mistralai/utils/eventstreaming.py - - src/mistralai/utils/forms.py - - src/mistralai/utils/headers.py - - src/mistralai/utils/logger.py - - src/mistralai/utils/metadata.py - - src/mistralai/utils/queryparams.py - - src/mistralai/utils/requestbodies.py - - src/mistralai/utils/retries.py - - src/mistralai/utils/security.py - - src/mistralai/utils/serializers.py - - src/mistralai/utils/url.py - - src/mistralai/utils/values.py +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + deleted: true + USAGE.md: + id: 3aed33ce6e6f + last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 + pristine_git_object: f71bbabc223b8cef8d923816fce8d572f3901884 + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/agent.md: + id: ffdbb4c53c87 + last_write_checksum: sha1:c87b05a17785cd83fdfc58cb2d55b6d77d3bc23e + pristine_git_object: 4de5a901d120b85ba5940490a2ec3fd4f1a91136 + docs/models/agentaliasresponse.md: + id: 5ac4721d8947 + last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 + pristine_git_object: aa531ec5d1464f95e3938f148c1e88efc30fa6a6 + docs/models/agentconversation.md: + id: 3590c1a566fa + last_write_checksum: sha1:43e7c1ed2b43aca2794d89f2e6d6aa5f1478cc3e + pristine_git_object: 451f6fb8f700dddd54c69593c316bf562b5cbc93 + docs/models/agentconversationagentversion.md: + id: 468e0d1614bb + last_write_checksum: sha1:6e60bf4a18d791d694e90c89bdb8cc38e43c324b + pristine_git_object: 668a8dc0f0c51a231a73aed51b2db13de243a038 + docs/models/agenthandoffdoneevent.md: + id: dcf166a3c3b0 + last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e + pristine_git_object: 6bfcc3d83457edf05d0f13957d34ead0f260599b + docs/models/agenthandoffentry.md: + id: 39d54f489b84 + last_write_checksum: sha1:a93a604ced2303eb6f93cfe0f1360224d3298b37 + pristine_git_object: 2b689ec720c02b7289ec462d7acca64a82b23570 + docs/models/agenthandoffstartedevent.md: + id: b620102af460 + last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 + pristine_git_object: 518b5a0c4521ec55a5a28ba3ef0ad1c1fce52792 + docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: + id: c09ec9946094 + last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f + pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb + docs/models/agentsapiv1agentsdeletealiasrequest.md: + id: 429307ab315d + last_write_checksum: sha1:8e0a8388bb51c234aa1eb5566cb68389ebe57574 + pristine_git_object: 8e95c0c31e8ac92b374c153d622d7806b9e59a8d + docs/models/agentsapiv1agentsdeleterequest.md: + id: 0faaaa59add9 + last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 + pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetagentversion.md: + id: 3316961b40c4 + last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 + pristine_git_object: 7fb9f2d578c4901ca1b41aaada6acc3a5ee94fa1 + docs/models/agentsapiv1agentsgetrequest.md: + id: 01740ae62cff + last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 + pristine_git_object: ceffe0096ffd6db97a6018d34870c29cec4fb0d3 + docs/models/agentsapiv1agentsgetversionrequest.md: + id: 88ed22b85cde + last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a + pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d + docs/models/agentsapiv1agentslistrequest.md: + id: c2720c209527 + last_write_checksum: sha1:99502da34d868f1563ad1e3ea256f3becdbefa11 + pristine_git_object: 4785a54c561f5f9e1eb7ffd3317c5faa9b8b56dd + docs/models/agentsapiv1agentslistversionaliasesrequest.md: + id: 69c8bce2c017 + last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba + pristine_git_object: 3083bf92641404738948cd57306eac978b701551 + docs/models/agentsapiv1agentslistversionsrequest.md: + id: 0bc44ed8d6bb + last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 + pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca + docs/models/agentsapiv1agentsupdaterequest.md: + id: 7692812cd677 + last_write_checksum: sha1:aaccaa13eeb0d775b0c6a0b23c328d3f3c2c2dbf + pristine_git_object: 7ef60becfcdde09c8ce0366361306c5661d67e24 + docs/models/agentsapiv1agentsupdateversionrequest.md: + id: a001251b1624 + last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f + pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac + docs/models/agentsapiv1conversationsappendrequest.md: + id: 70f76380e810 + last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a + pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 + docs/models/agentsapiv1conversationsappendstreamrequest.md: + id: f6ada9a592c5 + last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab + pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 + docs/models/agentsapiv1conversationsdeleterequest.md: + id: c2c9f084ed93 + last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 + pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a + docs/models/agentsapiv1conversationsgetrequest.md: + id: d6acce23f92c + last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf + pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e + docs/models/agentsapiv1conversationshistoryrequest.md: + id: e3efc36ea8b5 + last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be + pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 + docs/models/agentsapiv1conversationslistrequest.md: + id: 406c3e92777a + last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 + pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationslistresponse.md: + id: 394c37d2203f + last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 + pristine_git_object: b233ee203ff5da0c65d6e9f87b2925d6802d2c0a + docs/models/agentsapiv1conversationsmessagesrequest.md: + id: 2c749c6620d4 + last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 + pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 + docs/models/agentsapiv1conversationsrestartrequest.md: + id: 6955883f9a44 + last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 + pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 + docs/models/agentsapiv1conversationsrestartstreamrequest.md: + id: 0c39856fd70e + last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 + pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d + docs/models/agentscompletionrequest.md: + id: 906b82c214dc + last_write_checksum: sha1:b5685a779b633823ccfe99d9740078e0aab50bde + pristine_git_object: 33435732b94c81c7bccff5cf1868b2f382223200 + docs/models/agentscompletionrequestmessage.md: + id: 5337f0644b40 + last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb + pristine_git_object: 957703b528d3da6f57576064d7cb9b2af63c362a + docs/models/agentscompletionrequeststop.md: + id: ad1e0e74b6b8 + last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 + pristine_git_object: 21ce6fb539238168e6d1dfc5a8206d55d33018d3 + docs/models/agentscompletionrequesttoolchoice.md: + id: bd8a6f9fbb47 + last_write_checksum: sha1:f3d9ec3c82b6bbd2c3cbc320a71b927edcc292b1 + pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 + docs/models/agentscompletionstreamrequest.md: + id: 21d09756447b + last_write_checksum: sha1:9d506ac8f620f4cef54b4b7a1891fb17b8eaefa5 + pristine_git_object: 407be8e0c1264a31cc0d80c1059f3bd62c2eaceb + docs/models/agentscompletionstreamrequestmessage.md: + id: b309ade92081 + last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb + pristine_git_object: 6ccf4244a709de7bedbf75042efb935129a6ca01 + docs/models/agentscompletionstreamrequeststop.md: + id: 4925b6b8fbca + last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 + pristine_git_object: 981005f3ff2277eae57c56787edb5f1f62d1fe46 + docs/models/agentscompletionstreamrequesttoolchoice.md: + id: b1f76f7a4e1c + last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe + pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced + docs/models/agenttool.md: + id: 513b8b7bc0b7 + last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 + pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 + docs/models/apiendpoint.md: + id: be613fd9b947 + last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 + pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be + docs/models/archivemodelresponse.md: + id: 133f4af8058f + last_write_checksum: sha1:95fa73ebd765cbd244c847218df6d31e18dc5e85 + pristine_git_object: 276656d1d00ca174e78aa9102f7f576575daa818 + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/attributes.md: + id: ececf40457de + last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef + pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 + docs/models/audiochunk.md: + id: 88315a758fd4 + last_write_checksum: sha1:b47b295122cea28d66212d75a1f0eccd70a248cc + pristine_git_object: 1ba8b0f578fa94b4f8dddf559798e033a1704e7b + docs/models/audioencoding.md: + id: 1e0dfee9c2a0 + last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 + pristine_git_object: feec8c71bf5a89a5c0099a9d075bc2bd36dd5f73 + docs/models/audioformat.md: + id: 41973dd397de + last_write_checksum: sha1:b81fbaf4f8aa03937c91f76d371ad5860836a772 + pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 + docs/models/audiotranscriptionrequest.md: + id: ebf59641bc84 + last_write_checksum: sha1:a478d0656a0f69d4c426e548e2236b99730e2084 + pristine_git_object: 80bd53015ddee1bcecc7aeecc75152a19afc22c1 + docs/models/audiotranscriptionrequeststream.md: + id: 79b5f721b753 + last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc + pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e + docs/models/basemodelcard.md: + id: 2f62bfbd650e + last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 + pristine_git_object: 0f42504fd6446c0baf4686bfbb8481658b6789cd + docs/models/batcherror.md: + id: 8053e29a3f26 + last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f + pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6 + docs/models/batchjob.md: + id: de2a00d0f739 + last_write_checksum: sha1:1160822c4032e1745dfaf37abcac02e78cbc4fb4 + pristine_git_object: 162e2cff3a1132f2b89e57dcf1bf8b4c403b6453 + docs/models/batchjobstatus.md: + id: 7e6f034d3c91 + last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc + pristine_git_object: 64617b31488130f94bf47952ccaa4958670473c8 + docs/models/batchrequest.md: + id: b113ca846594 + last_write_checksum: sha1:f9dc702c27b8257e008390519df744290e09c4b4 + pristine_git_object: 6ee3b394a8b1125769a355359b5a44bc7c3224ea + docs/models/builtinconnectors.md: + id: 9d14e972f08a + last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c + pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/chatclassificationrequest.md: + id: 57b86771c870 + last_write_checksum: sha1:bfd2fb8e2c83578ca0cea5209ea3f18c3bcd2ae5 + pristine_git_object: ba9c95eab2c1e4f080e39e8804a5de222e052ee6 + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:4980b698006c641b1c84495c5b601cc8662b05f6 + pristine_git_object: 921161faf38b2f4d4648d6d744c08a96ed38f0a6 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:c54d4a32d0d65533b79c381174690e9b735b2800 + pristine_git_object: 8761f000d4249de86265bc63da785cd807c2e7a5 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/chatmoderationrequest.md: + id: 22862d4d20ec + last_write_checksum: sha1:9bbe510ee67515092bd953ad7f84ae118398af54 + pristine_git_object: f252482db0e404e21a61aafba0d09d9561610c11 + docs/models/chatmoderationrequestinputs1.md: + id: 89311e3e440d + last_write_checksum: sha1:8d4c2dbd9207589aabf9c00cf60c61d2d3eef452 + pristine_git_object: e15b8a844110fae68c02da040cd0122be5afc09a + docs/models/chatmoderationrequestinputs2.md: + id: 4daa876da841 + last_write_checksum: sha1:e34eb6557e06e7783ed14d959c2a29959c26fd4c + pristine_git_object: f40a4ebe0780c493e8bd7a322aec31893669a181 + docs/models/chatmoderationrequestinputs3.md: + id: aec173bca43b + last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 + pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 + docs/models/checkpoint.md: + id: 9c97119961cf + last_write_checksum: sha1:0e7732d9c30f67d59fe4d9ad1d165ad0cd80c790 + pristine_git_object: f7f35530c0d57aca02b2503e968a9a262bb1a10d + docs/models/classificationrequest.md: + id: 6f79e905a3fa + last_write_checksum: sha1:3e083210e1cfdd3539e714928688648673767ae8 + pristine_git_object: 99cdc4a0863577d523e8921af31a179f109bc9fb + docs/models/classificationrequestinputs.md: + id: aff99510c85a + last_write_checksum: sha1:c4b52dd83924f56bef1f54c4fbbdf3cd62e96dbe + pristine_git_object: 69d75d11276f6101452a9debfa2cbcdd39333849 + docs/models/classificationresponse.md: + id: 21227dec49f2 + last_write_checksum: sha1:56756a6c0c36ce94653b676eba1f648907a87a79 + pristine_git_object: d1633ae779850cba0eac4a9c26b5b776a7b789e0 + docs/models/classificationtargetresult.md: + id: 97a5eab5eb54 + last_write_checksum: sha1:41269d1372be3523f46cb57bd19292af4971f7c0 + pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 + docs/models/classifierfinetunedmodel.md: + id: b67a370e0ef1 + last_write_checksum: sha1:5fe3c26e337083716dd823e861924a03c55ce293 + pristine_git_object: ad05f93147d6904ee62602480c24644ec5e4cf63 + docs/models/classifierfinetuningjob.md: + id: 5bf35c25183f + last_write_checksum: sha1:afedddfe38e217189b5ec12ded74606c3b1e4c59 + pristine_git_object: 369756ba16a4c64f03cb6bb5da9bc0abd2a8eac6 + docs/models/classifierfinetuningjobdetails.md: + id: c91d53e010d5 + last_write_checksum: sha1:59a4c11a0d52b02ffc48e011a40fb4ebb1604825 + pristine_git_object: c5efdf1c817b978506a4862991a0f8eab8b219fb + docs/models/classifierfinetuningjobdetailsintegration.md: + id: e6c161ac2a44 + last_write_checksum: sha1:6450686e7f92ac8c1c02fcea82d5855ca6738b46 + pristine_git_object: 438a35d9eb0e4250a9e6bcbb7dafeb26d74e018a + docs/models/classifierfinetuningjobdetailsstatus.md: + id: 87737e85b845 + last_write_checksum: sha1:2ff02df3efee0f9b5867045d43fc71025fb37129 + pristine_git_object: 058c65832188f7148d96ab320114d984d618efa1 + docs/models/classifierfinetuningjobintegration.md: + id: 91de20176a8c + last_write_checksum: sha1:e49a7c082300eb4d3106e96b21ebc6860060b8c3 + pristine_git_object: 820aee4c6fcf899341d869d796b1a61d4d4eab42 + docs/models/classifierfinetuningjobstatus.md: + id: e3c4e672dc88 + last_write_checksum: sha1:1bfd306ab633d3ea73272e56796c1f63843fce22 + pristine_git_object: ca829885de056c5ccafec0fe3a901743e56deb0c + docs/models/classifiertarget.md: + id: 4c5c0b3e0bc7 + last_write_checksum: sha1:ad16823def0acb267543c4189df32406a27685aa + pristine_git_object: f8c99e2e7e6653d0e809506861ec4c25571cb5c9 + docs/models/classifiertargetresult.md: + id: c78d27aec276 + last_write_checksum: sha1:17c37c10385019953d6085fff6681808f950693f + pristine_git_object: ccadc623493bfa946dc2cccf894364b1e6b8b452 + docs/models/classifiertrainingparameters.md: + id: 9370e1ccd3d5 + last_write_checksum: sha1:03f7c32717792966afdec50cb9dc1c85bb99dd84 + pristine_git_object: 3b6f3be6942bbcf56261f773864a518d16923880 + docs/models/codeinterpretertool.md: + id: f009740c6e54 + last_write_checksum: sha1:a2114d61a98a48b4365a77c0c75c06ca834367ad + pristine_git_object: 6302fc627d7c49442b6c9aec19c70fdceaf7c519 + docs/models/completionargs.md: + id: 3b54534f9830 + last_write_checksum: sha1:7432daccf23d8963a65fa4f2b103ea0396fbfbeb + pristine_git_object: 148f760859636e8c32259604698785663491a445 + docs/models/completionargsstop.md: + id: 40b0f0c81dc8 + last_write_checksum: sha1:2a576618c62d4818af0048ed3a79080149a88642 + pristine_git_object: b93f993e44a18fb0f3711163277f538cfedbf828 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionfinetunedmodel.md: + id: 23a7705a9c89 + last_write_checksum: sha1:50d173b7505a97435c9d7ccb4fa99af04a51c6a2 + pristine_git_object: 0055db021f1c039c84cf7cfecd654683d2f9996f + docs/models/completionfinetuningjob.md: + id: 13c69dd18690 + last_write_checksum: sha1:b77e82f00f851034999986ff67aea5b0b558fbd2 + pristine_git_object: 83c0ae7e551e1f70df8dad4dce75ad20fe2b7ae7 + docs/models/completionfinetuningjobdetails.md: + id: b285f80afd59 + last_write_checksum: sha1:6ced5483d8249d7e8f502ec3f53f45d76e348003 + pristine_git_object: 3c54e874bcd968a9d5d9c8b3285632ba71364763 + docs/models/completionfinetuningjobdetailsintegration.md: + id: 27662795c95f + last_write_checksum: sha1:655f03341ad1b590ec451288607cec61024bfefc + pristine_git_object: 38f6a34963db4a653ec7aa7f0c85b68e837ebafc + docs/models/completionfinetuningjobdetailsrepository.md: + id: 023920eecc9e + last_write_checksum: sha1:2b8ba6ff115fda4cc6ed74825fb09b9500d915f6 + pristine_git_object: c6bd67cde1d1628aa3efc4a53fa8487a009aa129 + docs/models/completionfinetuningjobdetailsstatus.md: + id: b1b717a4e256 + last_write_checksum: sha1:97c8699f0979978ea4320da3388e18da6219cb87 + pristine_git_object: 94d795a9ba4ec743f274d4ab5666e8897d174c61 + docs/models/completionfinetuningjobintegration.md: + id: 392ffc2cdef2 + last_write_checksum: sha1:53540da44e0edbad5d4085f81ded159dbc582a6c + pristine_git_object: dbe57417d78f1de798c6eaea7e56984e3b002cb9 + docs/models/completionfinetuningjobrepository.md: + id: deb47b72e8e4 + last_write_checksum: sha1:c0fd43a01c2f763c7945311741ee3c2b9c7520f6 + pristine_git_object: 54225e27204b703a6b33d2d66492e272559c3b3c + docs/models/completionfinetuningjobstatus.md: + id: 2ac420312815 + last_write_checksum: sha1:90f498cb04e89e8f4a424762c07231fd9030b326 + pristine_git_object: db151a1bd871a2bf231424a78c8c450b2a488099 + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/completiontrainingparameters.md: + id: b716b0195d39 + last_write_checksum: sha1:1d8d7c469f933ea741ec15c8b9ef8b986e0ca95e + pristine_git_object: 4746a95df18c78331f572425a16b2b3dcbc2df4c + docs/models/confirmation.md: + id: 19b9e48a3c2e + last_write_checksum: sha1:eb6494cb19f23c6df62afb009cc88ce38d24af86 + pristine_git_object: fd6e6aaa58cabba0cdec1b76ac50fb6e46f91b07 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c + pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac + docs/models/conversationappendrequest.md: + id: 722746e5065c + last_write_checksum: sha1:c8a4a49f0a1fe5cdd2ef6264ef9c600cfc8f7beb + pristine_git_object: 78a96508e4e1c6f83de4556d0bfa3b10c875da37 + docs/models/conversationappendrequesthandoffexecution.md: + id: e3f56d558037 + last_write_checksum: sha1:dc71c8db746bb08f6630e995cf6af9fda747e954 + pristine_git_object: 7418b36a55fab959639aec456a946600eb908efb + docs/models/conversationappendstreamrequest.md: + id: e9f8131435e8 + last_write_checksum: sha1:3afe7eaafbf61abcd9341ee8fbca5c6d0c2db0ab + pristine_git_object: daea9c522a8a0693edce11b1bbeca1f2cba0781e + docs/models/conversationappendstreamrequesthandoffexecution.md: + id: 5739ea777905 + last_write_checksum: sha1:c85584b63c0c5d859ee5d46d6ae167a8ee44e279 + pristine_git_object: 1bbced3e61a521401ae93a7b1f73d0e9c061e5fd + docs/models/conversationevents.md: + id: be63cc7c526e + last_write_checksum: sha1:1667c767ef53fd7aef90452fde2a8245ed2b2ae6 + pristine_git_object: f1e2c4e90181ff729d3fdb37b0135e9bbd095c04 + docs/models/conversationeventsdata.md: + id: d4907b066f4b + last_write_checksum: sha1:f58b7f3e738c2d0146b228076a5dc0c6cf84ffb1 + pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 + docs/models/conversationhistory.md: + id: 7e97e8e6d6e9 + last_write_checksum: sha1:719a7c0722f3ad2e9f428dd31abf7bd0bad197d2 + pristine_git_object: daefe3363fb57d9a7d2737d3ea3d6e6f61021d49 + docs/models/conversationinputs.md: + id: 23e3160b457d + last_write_checksum: sha1:0c6abaa34575ee0eb22f12606de3eab7f4b7fbaf + pristine_git_object: 86db40ea1390e84c10a31155b3cde9066eac23b0 + docs/models/conversationmessages.md: + id: 46684ffdf874 + last_write_checksum: sha1:5b10a9f3f19591a2675979c21dd8383d5249d728 + pristine_git_object: 8fa51571697ee375bfbc708de854bc0b1129eec7 + docs/models/conversationrequest.md: + id: dd7f4d6807f2 + last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c + pristine_git_object: bd7823a88a07d4bc8fe1da82e51f843e70480ee1 + docs/models/conversationrequestagentversion.md: + id: 68aad87b1459 + last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 + pristine_git_object: 9f2518211256762d03dec12c4c4464d48f7ed52c + docs/models/conversationrequesthandoffexecution.md: + id: 9733b1e121d1 + last_write_checksum: sha1:f7df210a46acf24abb1312123aebe9e595a190e8 + pristine_git_object: e7314f7e0080ff3f1a80afdbb229c78df5b008bb + docs/models/conversationrequesttool.md: + id: bd1bb6fcea8b + last_write_checksum: sha1:69d503d73f5bd044882d13cd0c7de188dd5f4831 + pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e + docs/models/conversationresponse.md: + id: 2eccf42d48af + last_write_checksum: sha1:8a86a4d0df6d13b121d5e41a8ee45555b69bf927 + pristine_git_object: 2732f785cdd706274ec5ff383f25fc201e6d0f78 + docs/models/conversationrestartrequest.md: + id: 558e9daa00bd + last_write_checksum: sha1:434e6c94b5d6c37b9026d536308cd1d3ff56e8d6 + pristine_git_object: ad3ff3624f533e4d4f751264d9bc6dd1849b3b69 + docs/models/conversationrestartrequestagentversion.md: + id: e6ea289c6b23 + last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f + pristine_git_object: 019ba301411729ec2c8078404adae998b3b9dacd + docs/models/conversationrestartrequesthandoffexecution.md: + id: faee86c7832c + last_write_checksum: sha1:44728be55e96193e6f433e2f46f8f749f1671097 + pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e + docs/models/conversationrestartstreamrequest.md: + id: 01b92ab1b56d + last_write_checksum: sha1:e9755598b5be197a938f1f74aa77ac24ccac8457 + pristine_git_object: 865a1e8f666d7f6878c40eb70fe5ab1c63da3066 + docs/models/conversationrestartstreamrequestagentversion.md: + id: 395265f34ff6 + last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 + pristine_git_object: 9e0063003f1d8acce61cf4edda91ddbc23a3c69d + docs/models/conversationrestartstreamrequesthandoffexecution.md: + id: 3e9c4a9ab94d + last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef + pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd + docs/models/conversationstreamrequest.md: + id: 833f266c4f96 + last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a + pristine_git_object: 8b74f9e7cdea83a5622df2c3b79debe3c4427288 + docs/models/conversationstreamrequestagentversion.md: + id: e99ccc842929 + last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 + pristine_git_object: 52ee96720abbb3fec822d0792dbde7020f9fb189 + docs/models/conversationstreamrequesthandoffexecution.md: + id: e6701e5f9f0c + last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 + pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 + docs/models/conversationstreamrequesttool.md: + id: 71df6212ff44 + last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc + pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba + docs/models/conversationthinkchunk.md: + id: b9a8324da8f1 + last_write_checksum: sha1:80aed188198434ceca134e7aa7351ddba82c92c9 + pristine_git_object: 1fb16bd99f2b6277f87cd40d5c1eca389819d725 + docs/models/conversationthinkchunkthinking.md: + id: 477db2d543bd + last_write_checksum: sha1:d9f8c37fe933a3e52e2adb3ffe283d79c187cd36 + pristine_git_object: 84b800188b248166aac0043994fa27d4d79aad9d + docs/models/conversationusageinfo.md: + id: 57ef89d3ab83 + last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 + pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 + docs/models/createagentrequest.md: + id: 9484bab389c1 + last_write_checksum: sha1:b3228a622081b6f4b2a8bdaa60ca16049517d819 + pristine_git_object: cca3a079c532d3426f65a15bb0affdd34fd1d3ac + docs/models/createagentrequesttool.md: + id: 72e5f99878c5 + last_write_checksum: sha1:a90ad01c15da321f0c8ec700ba359a5371c5dcbb + pristine_git_object: c6ed3e98566eb684932fae9d2648a85c84443493 + docs/models/createbatchjobrequest.md: + id: e79afe8f495c + last_write_checksum: sha1:6cedce49f3108b9d5bc80e6d11712c594f2d9e50 + pristine_git_object: d094e2d518b31ada68c282241af3aa1483e98ff6 + docs/models/createfileresponse.md: + id: ea1396cebae8 + last_write_checksum: sha1:7b26d0a466004aca5cefaeb29f84dafc405c51ff + pristine_git_object: 8152922b0d4ce199e269df955e5a25d4acf71e28 + docs/models/createfinetuningjobrequest.md: + id: 36824ba035ff + last_write_checksum: sha1:78f019530e9f5deace91c454c91ec6c4d0d23a20 + pristine_git_object: a93e323d5dd474c6d287e1203e85b79d11d762f0 + docs/models/createfinetuningjobrequestintegration.md: + id: e41b5575b494 + last_write_checksum: sha1:06dab95269f4a571a4c62a7f956fbf0250a0e8b3 + pristine_git_object: 0054a4a683a88fe67f92c1659bcb8c792ca8d286 + docs/models/createfinetuningjobrequestrepository.md: + id: e113eb1929b5 + last_write_checksum: sha1:6bd504d3ecb219f3245a83d306c1792133b96769 + pristine_git_object: 32be1b6dc3fcf7f6ee1a1d71abee4c81493655c2 + docs/models/createlibraryrequest.md: + id: 8935b2ed9d13 + last_write_checksum: sha1:c00abfe1abb0f0323e434b084dafa0d451eb3e51 + pristine_git_object: 71562806dbec6444dcdd0a19852a31ca00b6229a + docs/models/deletefileresponse.md: + id: ab3aa44589a0 + last_write_checksum: sha1:47ebc2474e4725e9ecb0f0d5940c604d9a82a4df + pristine_git_object: 188e2504606b051674352339c6aa999116a43b61 + docs/models/deletemodelout.md: + id: 5643e76768d5 + last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 + pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd + docs/models/deletemodelv1modelsmodeliddeleterequest.md: + id: c838cee0f093 + last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 + pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:77076e66dea6f4582e73ecc5a55ef750f026448a + pristine_git_object: 284babb98fbb0279bef2626fa18eada0035572c5 + docs/models/documentlibrarytool.md: + id: 68083b0ef8f3 + last_write_checksum: sha1:76b9f47c399915a338abe929cb10c1b37282eadf + pristine_git_object: 95c3fa52ee3ff29e72bc0240a98c0afaa0cd5f62 + docs/models/documenttextcontent.md: + id: 29587399f346 + last_write_checksum: sha1:93382da0228027a02501abbcf681f247814d3d68 + pristine_git_object: 989f49e9bcb29f4127cb11df683c76993f14eba8 + docs/models/documentunion.md: + id: c65f9e42375c + last_write_checksum: sha1:249043e03067f79b27dc6eac410fb937920e8cdb + pristine_git_object: e573bd4632493ca648ad61307c70148366625d4b + docs/models/documentupload.md: + id: 7ff809a25eb0 + last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 + pristine_git_object: 4e58a475f1776431c9c27a0fcdd00dd96257801f + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 + docs/models/embeddingdtype.md: + id: 22786e732e28 + last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 + pristine_git_object: 01656b0a85aa87f19909b18100bb6981f89683fc + docs/models/embeddingrequest.md: + id: bebee24421b4 + last_write_checksum: sha1:8e2bfa35f55b55f83fa2ebf7bee28cd00cb681d1 + pristine_git_object: 7269c0551a0c1040693eafdd99e1b8ebe98478a5 + docs/models/embeddingrequestinputs.md: + id: 6a35f3b1910a + last_write_checksum: sha1:e12ca056fac504e5af06a304d09154d3ecd17919 + pristine_git_object: 527a089b38b5cd316173ced4dc74a1429c8e4406 + docs/models/embeddingresponse.md: + id: 31cd0f6b7bb5 + last_write_checksum: sha1:1d7351c68b075aba8e91e53d29bdab3c6dd5c3a2 + pristine_git_object: 2bd85b4d245978ec396da067060cfe892f19c64f + docs/models/embeddingresponsedata.md: + id: 89b078acdc42 + last_write_checksum: sha1:e3e9200948f864382e0ecd3e04240b13d013141a + pristine_git_object: 20b50618ac99c63f7cf57fe4377840bfc1f85823 + docs/models/encodingformat.md: + id: 066e154e4d43 + last_write_checksum: sha1:8d6c4b29dea5ff7b0ae2b586951308fad99c60eb + pristine_git_object: 7d5941cfe6cea2e85b20d6fb0031e9b807bac471 + docs/models/entitytype.md: + id: 130a2f7038b0 + last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 + pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 + docs/models/entry.md: + id: da9a99ab48ab + last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 + pristine_git_object: d934b6774b25713afe923154d7709755426ec2cf + docs/models/event.md: + id: 311c22a8574a + last_write_checksum: sha1:627793d6aed5e378e3f2eeb4087808eb50e948d5 + pristine_git_object: 3eebffca874b8614a5be3d75be3cb7b0e52c2339 + docs/models/file.md: + id: 4ad31355bd1c + last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b + pristine_git_object: 37cc418f9e5189c18f312c42060fd702e2963765 + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/filepurpose.md: + id: ed6216584490 + last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a + pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b + docs/models/filesapiroutesdeletefilerequest.md: + id: 7fdf9a97320b + last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 + pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c + docs/models/filesapiroutesdownloadfilerequest.md: + id: b9c13bb26345 + last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 + pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 + docs/models/filesapiroutesgetsignedurlrequest.md: + id: 08f3772db370 + last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f + pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 + docs/models/filesapirouteslistfilesrequest.md: + id: 04bdf7c654bd + last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 + pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 + docs/models/filesapiroutesretrievefilerequest.md: + id: 2783bfd9c4b9 + last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab + pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b + docs/models/fileschema.md: + id: 9a05a660399d + last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 + pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/finetuneablemodeltype.md: + id: e16926b57814 + last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 + pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd + docs/models/finetunedmodelcapabilities.md: + id: 3a6a0498ccf7 + last_write_checksum: sha1:82fc7d3f4e0b591b757f202699bb645bc61c69ff + pristine_git_object: d3203a2adccb7eb89c58395952c3e5a123a5b31b + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/ftclassifierlossfunction.md: + id: b546cfde5aa6 + last_write_checksum: sha1:752d9d238a90a3ef55205576fa38cee56ea1539e + pristine_git_object: 919cdd384315c99d4b590bc562298403733344ce + docs/models/ftmodelcard.md: + id: 15ed6f94deea + last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 + pristine_git_object: 409f0526316a621b30dfbe45126c6b232e01fad4 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functioncallentry.md: + id: 016986b7d6b0 + last_write_checksum: sha1:373eb3a2d72596fcbb8933b28426896d5ac6b6f4 + pristine_git_object: 2843db9d36d8b82a15ebfce0833c8b0832609b4a + docs/models/functioncallentryarguments.md: + id: c4c609e52680 + last_write_checksum: sha1:ae88aa697e33d60f351a30052aa3d6e2a8a3e188 + pristine_git_object: f1f6e39e724673556a57059a4dbda24f31a4d4b9 + docs/models/functioncallentryconfirmationstatus.md: + id: 18f36160d744 + last_write_checksum: sha1:cc3ea4e03d26a1b22f94d42a87bd5ae63535d266 + pristine_git_object: 8948beb6d9ac647ada655960284dfc7f6d1f5ca1 + docs/models/functioncallevent.md: + id: cc9f2e603464 + last_write_checksum: sha1:58c6ee00af0c63614fd7506345977f9f2d8838ec + pristine_git_object: 0e3a36d6045a69e96c40836cdb586424225775af + docs/models/functioncalleventconfirmationstatus.md: + id: a33cc7957407 + last_write_checksum: sha1:36ac2d3442d83cbb1256e86f413134296bf8e90f + pristine_git_object: 4a3c8774d4eec4e1f5fea23a1827082e09f91669 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/functionresultentry.md: + id: 24d4cb18998c + last_write_checksum: sha1:1758992e30517b505b8d0622a54545dc9ae19163 + pristine_git_object: 6a77abfd27e3e46de950646d7f89777dca11300e + docs/models/functiontool.md: + id: 5fb499088cdf + last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d + pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 + docs/models/getfileresponse.md: + id: a983b3c8acd6 + last_write_checksum: sha1:5ca732ae5b384937473c04de6736fbab34deca24 + pristine_git_object: 0edd13e0818fc70c9c4db1e08b1490c1e146ea63 + docs/models/getsignedurlresponse.md: + id: 5539e5d7c3d4 + last_write_checksum: sha1:7198474f48bfba6d47326cd436e4a00a8ba70ce3 + pristine_git_object: bde693236406fe092f48c315e3b68a2fbbe6f9a4 + docs/models/githubrepository.md: + id: 66c120df624b + last_write_checksum: sha1:045e538dd7faffc1c6c6e6816563c5c8e776a276 + pristine_git_object: 827b6f34ae68ace7b8b4811764f59de2e0fcdd22 + docs/models/githubrepositoryin.md: + id: b42209ef8423 + last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 + pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db + docs/models/hyperparameters.md: + id: c167bad5b302 + last_write_checksum: sha1:e391cf72690e6cd01a2878081b8d87938e1c6639 + pristine_git_object: b6c00c3647d21789c92ad7d32dd29c3089ca134f + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 + docs/models/imagegenerationtool.md: + id: d5deb6b06d28 + last_write_checksum: sha1:a1813ef99e4a0990fd073bb2311c475e88072029 + pristine_git_object: b476b6f2733a49767d7f7a4ad091fc321ab514f4 + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/inputentries.md: + id: a5c647d5ad90 + last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 + pristine_git_object: b44a467d258cfa8cc3d2a3236330471dbc3af109 + docs/models/inputs.md: + id: 4b0a7fb87af8 + last_write_checksum: sha1:c5f0c21c25fd5a698398a9e4ddf6261add60740c + pristine_git_object: d5771207d9725f04ca2ab1be692fc089360a58f4 + docs/models/instructrequest.md: + id: a0034d7349a2 + last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 + pristine_git_object: 5f0cdfff135fb72d3b1a81999a30b720c044e3d4 + docs/models/instructrequestmessage.md: + id: 380503708a09 + last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 + pristine_git_object: 57ed27ab3b1430514797dd0073bc87b31e5e3815 + docs/models/jobmetadata.md: + id: 1f8e4c2f49e5 + last_write_checksum: sha1:a29ec10cd129b955672f60aaf526905780afe6b6 + pristine_git_object: 5d8a89ddc6b401a80e23d51cb378cdac5d4eb342 + docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: + id: 798cb1ca1385 + last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 + pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 + docs/models/jobsapiroutesbatchgetbatchjobrequest.md: + id: e83a7ec84f8a + last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 + pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 + docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: + id: 5b9c44ad4d31 + last_write_checksum: sha1:1d7c05337b7cfe68f85a36576d060e1a890f9f96 + pristine_git_object: 5ceb0b2c40f079ffbe2cc4c82f6c3f94276980b4 + docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: + id: 8eb8c127091e + last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e + pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e + docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: + id: deff83b39b78 + last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 + pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 + docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: + id: c45757ba1ed9 + last_write_checksum: sha1:4931469b58d454264f1e8d32df6a07d3f6f01022 + pristine_git_object: fb62eb62027c8151d597544fcaf27b972aeb78b3 + docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: + id: 8aa8030f26d7 + last_write_checksum: sha1:4aada0d2297479d8276f5a422cb4dd6b56b1e176 + pristine_git_object: 7b52e2ca6365f17ac3b19d128937783d87c7fa37 + docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: + id: a9b75762e534 + last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba + pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 + docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: + id: c0b31f4fc621 + last_write_checksum: sha1:4ceb9df28082bf5d496cd222a0f45dc81a576367 + pristine_git_object: f770532776a13860e697da7478d1677d16f0ec36 + docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: + id: 52078f097503 + last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 + pristine_git_object: 23c52c342358ea889b25ee7b18b381b68519c6cf + docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: + id: 8545ffb587d6 + last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 + pristine_git_object: 40d57686aec11d9bdc4c116ea4c98183e0a6414c + docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: + id: b4e2b814d8c3 + last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e + pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b + docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: + id: cfd848845787 + last_write_checksum: sha1:a165279fa0c9e051458ea4333dfdd31ef0440426 + pristine_git_object: 1a7e71d4479369f13c391a9782278557bc4531ae + docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: + id: 75b5dd1bcbaa + last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f + pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: + id: 60bd2e28993a + last_write_checksum: sha1:58835c28cccaf90e99bbb72bf7c5a5ce42498824 + pristine_git_object: dbe49a86ca2bf64901133fd58a342d30909c35b2 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: + id: c265a30fd4cf + last_write_checksum: sha1:410c62a884aae902cdfbfcab33779e62487de13b + pristine_git_object: f40350bf9d74d09ca3a2ec6d91d9068bda631ef5 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/legacyjobmetadata.md: + id: 50ac14d9b270 + last_write_checksum: sha1:ebe37a176ca318e797fee7ebf4eef73fb9938a12 + pristine_git_object: 4705ab4f67e10b8e2cbfc86b29c03a9945aeb8fb + docs/models/librariesdeletev1request.md: + id: c0c3b2e1aabc + last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 + pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c + docs/models/librariesdocumentsdeletev1request.md: + id: 9d557bd7d1cc + last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 + pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 + docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: + id: 27ad38ce4cb1 + last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 + pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 + docs/models/librariesdocumentsgetsignedurlv1request.md: + id: 4498715b6cfb + last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 + pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f + docs/models/librariesdocumentsgetstatusv1request.md: + id: c2219d3a3738 + last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 + pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d + docs/models/librariesdocumentsgettextcontentv1request.md: + id: 850dfa465952 + last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d + pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac + docs/models/librariesdocumentsgetv1request.md: + id: cdd0df2f7e9d + last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef + pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 + docs/models/librariesdocumentslistv1request.md: + id: 7b5756e50d64 + last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f + pristine_git_object: 44f6300115853053214639982516a60b3268e778 + docs/models/librariesdocumentsreprocessv1request.md: + id: 1b8bf57b3f0a + last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 + pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 + docs/models/librariesdocumentsupdatev1request.md: + id: b9147b1c0e38 + last_write_checksum: sha1:ed3ae7761990bd26a4bf99cd4641822eb90d3d57 + pristine_git_object: d46308509330099e30a53dddad51da8a6186aa92 + docs/models/librariesdocumentsuploadv1request.md: + id: 89a89d889c72 + last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 + pristine_git_object: 172a6183f31eec3142a84637414484799c2a4677 + docs/models/librariesgetv1request.md: + id: f47ad71ec7ca + last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac + pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 + docs/models/librariessharecreatev1request.md: + id: 99e7bb8f7fed + last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 + pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + docs/models/librariessharedeletev1request.md: + id: bc8adba83f39 + last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf + pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd + docs/models/librariessharelistv1request.md: + id: 86e6f08565e2 + last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 + pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 + docs/models/librariesupdatev1request.md: + id: f7e51b528406 + last_write_checksum: sha1:6a33b0161702ecc335dd2859df1bbc05b73702a9 + pristine_git_object: c5c142db7aaa49990135c21eabde43b8c0fdf756 + docs/models/library.md: + id: e8ec114dd107 + last_write_checksum: sha1:a4d6e9a777ce3d63aac24432281933ce6e13b4a9 + pristine_git_object: 4319f43df922b4924a11d494002826cb8d6dea0b + docs/models/listbatchjobsresponse.md: + id: e03025d58630 + last_write_checksum: sha1:de42c9396546fc8487d0bd6ed15b4076599fa83f + pristine_git_object: c23e32201d12a2594f97a493f63b2b7b42b9e337 + docs/models/listdocumentsresponse.md: + id: f2091cee0405 + last_write_checksum: sha1:335d0ccd3a448e65739d5a0cfa2c67614daec031 + pristine_git_object: 47b9d3b73fdc85bf6e463c91790faf346df56664 + docs/models/listfilesresponse.md: + id: b15df90d2d59 + last_write_checksum: sha1:4840f26427acf8846a9f1e48136f0663c6e4cd87 + pristine_git_object: 802f685fb3a76afb86a69cf41e6de9339cd6fbc7 + docs/models/listfinetuningjobsresponse.md: + id: d04e4dfddf78 + last_write_checksum: sha1:cebaf361aa10f1f6c4299c3c8a34f32d301455ad + pristine_git_object: 00251242023e2161747ebf00b4c2959909e2b654 + docs/models/listfinetuningjobsresponsedata.md: + id: 59c80de4086d + last_write_checksum: sha1:5a0d91c251b4b9283895d9f19f7b9416f93d4468 + pristine_git_object: adb0644475841c6a4686e8c42790dd44eed43dc1 + docs/models/listlibrariesresponse.md: + id: 87e3bec10745 + last_write_checksum: sha1:00522e685ec71a54f5f272d66b82e650848eaf36 + pristine_git_object: e21b9ced628f6fd5ae891d4a46666ebc94546859 + docs/models/listsharingout.md: + id: a3249129f37e + last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f + pristine_git_object: bcac4834f3bd008868435189f40bbf9e368da0d2 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messageentries.md: + id: 9af3a27b862b + last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc + pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d + docs/models/messageinputcontentchunks.md: + id: 34aac9c271db + last_write_checksum: sha1:d8ffdfd8b5458497e2cb6a32f52900c3ca2a6ddf + pristine_git_object: 0561785082c741f39f930ab7ded5b6c6a9ade6ad + docs/models/messageinputentry.md: + id: eb74af2b9341 + last_write_checksum: sha1:c91bfdf9426c51236b6ff33d127dbe62b051a9da + pristine_git_object: f8514fb3305dbe1df91db8d622cc33a753b63623 + docs/models/messageinputentrycontent.md: + id: 7e12c6be6913 + last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a + pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e + docs/models/messageoutputcontentchunks.md: + id: 802048198dc0 + last_write_checksum: sha1:8cf4e4ea6b6988e22c117d8f689bbfb0869816ad + pristine_git_object: c4a7777e7675ebf2384311ec82b2713da69e5900 + docs/models/messageoutputentry.md: + id: f969119c8134 + last_write_checksum: sha1:f50b955cd622a6160c0ada34b0e14bff612802b7 + pristine_git_object: 73a1c666acc913b96d65a124612c4a728882bbc9 + docs/models/messageoutputentrycontent.md: + id: 44019e6e5698 + last_write_checksum: sha1:d0cc7a8ebe649614c8763aaadbf03624bb9e47e3 + pristine_git_object: 5206e4eb0d95e10b46c91f9f26ae00407d2dd337 + docs/models/messageoutputevent.md: + id: b690693fa806 + last_write_checksum: sha1:a4157c087ff95fa9445757c9d363615718156164 + pristine_git_object: e09a965f7d4cc35d6b120ba5555d96ba7b3e8a27 + docs/models/messageoutputeventcontent.md: + id: cecea075d823 + last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 + pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb + docs/models/metric.md: + id: a812a3e37338 + last_write_checksum: sha1:14016848dcfaba90014b482634ed6d5715caa860 + pristine_git_object: 7f86303651650177ece51b82d867cab858e830ae + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 + docs/models/modelcapabilities.md: + id: 283fbc5fa32f + last_write_checksum: sha1:8a221e2334193907f84cf241ebaf6b86512bbd8b + pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca + docs/models/modelconversation.md: + id: 497521ee9bd6 + last_write_checksum: sha1:22a8d7502eeaf176fbd1c7b22b512b4f9e4e043f + pristine_git_object: af2e5c6149339a561b03b954cd0e71f9d9aeffd6 + docs/models/modelconversationtool.md: + id: 2dd28167bc36 + last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f + pristine_git_object: 8723556753d077969bc665a423c057ae4ceaa0d2 + docs/models/modellist.md: + id: ce07fd9ce413 + last_write_checksum: sha1:b4c22b5eff4478ffa5717bd5af92ca79f4a90b01 + pristine_git_object: 85b20be7376f80cf169c25b3c7117079cd4c2828 + docs/models/modellistdata.md: + id: e2eb639c646f + last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 + pristine_git_object: b44e84a00d0c54f8df78650d45de0a409c901048 + docs/models/moderationobject.md: + id: 4e84364835f5 + last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e + pristine_git_object: 320b2ab4935f8751eb58794e8eb9e422de35ae7c + docs/models/moderationresponse.md: + id: e15cf12e553b + last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 + pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 + docs/models/multipartbodyparams.md: + id: f5be2d861921 + last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 + pristine_git_object: f14b95737fde09a120b35e2f922568ca31825bd5 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:2faa819df648d330074c177d8f5d4a9c9a27bc90 + pristine_git_object: dd3fc2ea28cc2bc147473ba9f73aa32a9528632a + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/orderby.md: + id: 9e749ed80f72 + last_write_checksum: sha1:6ec002e3e59f37002ccb14e347b790ca4daef773 + pristine_git_object: bba50df10855a8d6acdf4b061ec2ffeb0279fd7f + docs/models/output.md: + id: 376633b966cd + last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b + pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 + docs/models/outputcontentchunks.md: + id: f7e175c8e002 + last_write_checksum: sha1:5adb0733a8ca9b224155dfef66dfb37b7f416972 + pristine_git_object: e5185014faa41b6e6d1567d713fc390f551fad01 + docs/models/paginationinfo.md: + id: 3d2b61cbbf88 + last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 + pristine_git_object: ad1fbb86c714c152a5e6e99d8a741e7346884e55 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/processingstatusout.md: + id: 83c8c59c1802 + last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c + pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + docs/models/realtimetranscriptionerror.md: + id: 4bc5e819565b + last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 + pristine_git_object: e01f2126b3084eade47a26ea092556f7f61142c9 + docs/models/realtimetranscriptionerrordetail.md: + id: ea137b1051f1 + last_write_checksum: sha1:7e1d18760939d6087cda5fba54553141f8a78d1e + pristine_git_object: 5b34755dc67359bb884d5c2387608686ee527470 + docs/models/realtimetranscriptionerrordetailmessage.md: + id: d25137243bef + last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f + pristine_git_object: da3764ef56337bdc773eaf8e9aa747cbd1b407e2 + docs/models/realtimetranscriptioninputaudioappend.md: + id: fa2aa317d1ca + last_write_checksum: sha1:59cce0828505fdb55104cd3144b75334e0f31050 + pristine_git_object: 5ee365eb9a993933509ac4666bcec24bfcc6fccd + docs/models/realtimetranscriptioninputaudioend.md: + id: 11045f9cc039 + last_write_checksum: sha1:945ca0475826294e13aba409f3ae2c2fc49b1b67 + pristine_git_object: 393d208c6e242959161f4436d53cf4aa2df69a92 + docs/models/realtimetranscriptioninputaudioflush.md: + id: c2f2258e0746 + last_write_checksum: sha1:a4e6d160da44c6f57b01059f7198208702e9b06a + pristine_git_object: 367725baa278935a6a282338ca7f2a23895a86d8 + docs/models/realtimetranscriptionsession.md: + id: aeb0a0f87d6f + last_write_checksum: sha1:d72bf67442ac5e99f194c429e96a504685f02efb + pristine_git_object: 750bd7f79b65666812c6207d7085b9437c49517d + docs/models/realtimetranscriptionsessioncreated.md: + id: aa2ae26192d6 + last_write_checksum: sha1:d13fec916d05300c86b52e951e81b1ceee230634 + pristine_git_object: 34e603fd0a1cbc8007eef06decb158213faebeed + docs/models/realtimetranscriptionsessionupdated.md: + id: 56ce3ae7e208 + last_write_checksum: sha1:833db566b2c8a6839b43cb4e760f2af53a2d7f57 + pristine_git_object: 7e2719957aae390ee18b699e61fbc7581242942f + docs/models/realtimetranscriptionsessionupdatemessage.md: + id: 02a5eee40cdd + last_write_checksum: sha1:44f8e6bc8f8cd4087a7e86c85db5141fab90f78d + pristine_git_object: 2a50ca92720bad6605bdeafd83b43d0e8bf40615 + docs/models/realtimetranscriptionsessionupdatepayload.md: + id: 3ddd5a95510a + last_write_checksum: sha1:33bca4d547ca812d55ac49bf7b17851b2fecfc80 + pristine_git_object: d6c6547d7895e53be15a0cce46b6524178acc3bc + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab + docs/models/requestsource.md: + id: 8857ab6025c4 + last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf + pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b + docs/models/response.md: + id: 583c991c7a30 + last_write_checksum: sha1:0791cb4aa4045708ab64d42bf67bd6ab74bc7752 + pristine_git_object: ff67925758959b87992b47a1a32c224eeeb599e3 + docs/models/responsedoneevent.md: + id: 38c38c3c065b + last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f + pristine_git_object: 63d4cc06493e1ca12cf0e8ef800acfc0bdc9a02d + docs/models/responseerrorevent.md: + id: 3e868aa9958d + last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c + pristine_git_object: 4309bdadc323918900cc4ca4fddb18788361d648 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/responseretrievemodelv1modelsmodelidget.md: + id: 6143ec73bdd6 + last_write_checksum: sha1:6bae62cbb18559065a53f0acdacb1f72f513467e + pristine_git_object: ffbc1473d39c8266bb6b05b37677c98ca1d10858 + docs/models/responsestartedevent.md: + id: 88e3b9f0aa8d + last_write_checksum: sha1:156f38bbe8278f9c03117135938e7cbdae3038b9 + pristine_git_object: e2f421af866690b34c2d9fa4595a63e9172a65f5 + docs/models/responsev1conversationsget.md: + id: 48d4a45780a9 + last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e + pristine_git_object: 844c5d610a9a351532d12b1a73f6c660059da76b + docs/models/retrievemodelv1modelsmodelidgetrequest.md: + id: ac567924689c + last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 + pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:c7ef39a81299f3156b701420ef634a8b4fab76f0 + pristine_git_object: 853c6257d9bdb4eda9cb37e677d35ab477dca812 + docs/models/sampletype.md: + id: 0e09775cd9d3 + last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a + pristine_git_object: 34a6a012b1daeeb22626417650269e9376cc9170 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:45b7b8881a6560a468153662d61b99605a492edf + pristine_git_object: 2e0839d06f821dd97780dc22f202dedf23e4efe1 + docs/models/shareenum.md: + id: 53a713500576 + last_write_checksum: sha1:9d45d4bd272e6c146c3a8a21fd759acf2ae22148 + pristine_git_object: dc5d2b68a810c2983b5a47fbff747dfc2cc17598 + docs/models/sharingdelete.md: + id: 165cac179416 + last_write_checksum: sha1:1a0b3c95f4b56173510e234d7a76df85c593f360 + pristine_git_object: 1dcec0950c7fcd264ea9369c24244b54ba2bcfbf + docs/models/sharingin.md: + id: 08d396ee70ad + last_write_checksum: sha1:662edfc07a007e94fe1e54a07cf89d7c83c08df5 + pristine_git_object: bac18c8d43f801e8b5cf5b3cd089f9da0ee2281a + docs/models/sharingout.md: + id: 5db4547c7c56 + last_write_checksum: sha1:bd15c318d1a3f5bee7d7104d34cbd8ba6233bbb8 + pristine_git_object: 35aeff43593f3c9067c22a2f8b1468d7faa5af34 + docs/models/source.md: + id: 6541ef7b41e7 + last_write_checksum: sha1:d0015be42fe759d818ebd75b0cec9f83535a3b89 + pristine_git_object: bb1ed6124647b02c4350123bf257b0bf17fc38fd + docs/models/ssetypes.md: + id: 6a902241137c + last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 + pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:d9fe94c670c5e0578212752c11a0c405a9da8518 + pristine_git_object: df0e61c32bc93ef17dbba50d026edace139fee6a + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:0f861f1653035dea2018be9a977c15f54add9531 + pristine_git_object: 70c0369f16465e1b1f5f46e8cd799e5db536cdde + docs/models/thinkchunkthinking.md: + id: 22de7b5060fb + last_write_checksum: sha1:5e0722b8d513b38d60fbfe28635bdea40b951593 + pristine_git_object: dd1ecca12b5cda76a51b1e13335f1757a9dd7a68 + docs/models/timestampgranularity.md: + id: eb4d5a8e6f08 + last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 + pristine_git_object: d20012ea9925446c16c9162304642ba48391d34d + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolcallconfirmation.md: + id: 944eebb142ff + last_write_checksum: sha1:864ccb39a00094d965b764235e74709945abca3d + pristine_git_object: 1812f7d687d83f5692d9e79709e56813ab2c79b1 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolconfiguration.md: + id: 06bfa2c4e662 + last_write_checksum: sha1:9b619977375f228c76f09d48d6e2833add6c07e2 + pristine_git_object: 89286a172124ce3473bcb081de6e4db8c95afefa + docs/models/toolexecutiondeltaevent.md: + id: f2fc876ef7c6 + last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e + pristine_git_object: 7066f3485407707500e5006335279bfa37db8705 + docs/models/toolexecutiondeltaeventname.md: + id: 93fd3a3b669d + last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc + pristine_git_object: 9c3edef8c0698d7293a71ee56410a0ed67fd1924 + docs/models/toolexecutiondoneevent.md: + id: b604a4ca5876 + last_write_checksum: sha1:6b6975ded0b0495b6c56250d153186c7818b5958 + pristine_git_object: b2d81be3cfa3e1dd0d1a58ef5ad16556c5e953c7 + docs/models/toolexecutiondoneeventname.md: + id: d19dc0060655 + last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 + pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 + docs/models/toolexecutionentry.md: + id: 75a7560ab96e + last_write_checksum: sha1:668d8fbc59bc729bf4b1d95d2f2bfe4097701c0e + pristine_git_object: 03316381b130cf02751b10fef4129c8f23072b76 + docs/models/toolexecutionentryname.md: + id: 86d537762559 + last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 + pristine_git_object: fb762a5382d8b0e93dc2eb277f18adf810057c55 + docs/models/toolexecutionstartedevent.md: + id: 37657383654d + last_write_checksum: sha1:5a020d24bdeb4eb9976ce93a8daa91947026bde9 + pristine_git_object: 189b8a3d3b22d73000850a3f1a95b85e358c2090 + docs/models/toolexecutionstartedeventname.md: + id: be6b33417678 + last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 + pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a + docs/models/toolfilechunk.md: + id: 67347e2bef90 + last_write_checksum: sha1:2e4c6ce703733c02e62467507c231033716fdb92 + pristine_git_object: d60021755729f1a2870e24a500b3220c8f1fc6e3 + docs/models/toolfilechunktool.md: + id: eafe1cfd7437 + last_write_checksum: sha1:73a31dbff0851612f1e03d8fac3dbbee77af2df0 + pristine_git_object: aa5ac8a99a33d8c511f3d08de93e693bf75fb2a1 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolreferencechunk.md: + id: 10414b39b7b3 + last_write_checksum: sha1:ea3bdfc83177c6b7183ad51fddb2d15aee0f0729 + pristine_git_object: 49ea4ca7b05e5fcaaf914f781e3a28483199d82d + docs/models/toolreferencechunktool.md: + id: c2210d74792a + last_write_checksum: sha1:368add3ac6df876bc85bb4968de840ac578ae623 + pristine_git_object: 999f7c34885015a687c4213d067b144f1585c946 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/trainingfile.md: + id: 4039958e8930 + last_write_checksum: sha1:d02543c2d1446e56501f2ac358a09669b0077648 + pristine_git_object: cde218bb2281a1274d013844ad76b4b2a34b986c + docs/models/transcriptionresponse.md: + id: 39e2354aca38 + last_write_checksum: sha1:7b32e2179c3efc675c05bba322cc33554a9ff9db + pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 + docs/models/transcriptionsegmentchunk.md: + id: f09db8b2273e + last_write_checksum: sha1:d4a7ebd6a8cc512a0bd00a49af4130c533254b44 + pristine_git_object: d7672c0eebb55243965306c94a771aa18ed641d6 + docs/models/transcriptionstreamdone.md: + id: 2253923d93cf + last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 + pristine_git_object: bca69a2b02e069ce240342d76ac408aec67993a9 + docs/models/transcriptionstreamevents.md: + id: d0f4eedfa2b6 + last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b + pristine_git_object: f760385dfbd9779e63d61ec6357901bc9b4ca8e9 + docs/models/transcriptionstreameventsdata.md: + id: 506af75a0708 + last_write_checksum: sha1:99fcb3bf3aab0fb87dc02a4e6ccef9271ff0ae89 + pristine_git_object: eea8e9281634c56517e28f613afee38e0b0846ad + docs/models/transcriptionstreameventtypes.md: + id: 701782e8a63d + last_write_checksum: sha1:ff79dfb5d942c807b03c9e329a254bfa95b99a16 + pristine_git_object: e4eb25a6400dcc5a48b5eb5f65e96f7be91fa761 + docs/models/transcriptionstreamlanguage.md: + id: 5e9df200153c + last_write_checksum: sha1:d5626a53dde8d6736bab75f35cee4d6666a6b795 + pristine_git_object: 63fcfbc63a65cdff4228601e8a46f9d003ec9210 + docs/models/transcriptionstreamsegmentdelta.md: + id: f59c3fb696f2 + last_write_checksum: sha1:7d6999abf5a01fc01c0d5302acd3218e535adc9a + pristine_git_object: 1b652a3b6dc4406a3b7efa8a412b15ca0a5d765f + docs/models/transcriptionstreamtextdelta.md: + id: 69a13554b554 + last_write_checksum: sha1:d969f462034ed356f2c8713b601ee7d873d4ce07 + pristine_git_object: 77bd0ddcf8a1d95707fa9e041de3a47bb9e7f56d + docs/models/unarchivemodelresponse.md: + id: a690f43df567 + last_write_checksum: sha1:5c9d4b78c92d30bb4835cb724d1ea22a19bf5327 + pristine_git_object: 375962a7110f814288ea9f72323383bd8194e843 + docs/models/updateagentrequest.md: + id: 371bfedd9f89 + last_write_checksum: sha1:97170995ed40391023f0dce5096cfebe83fa7dc8 + pristine_git_object: d3428d92a8f23670a6b587a6017a353d2c12a815 + docs/models/updateagentrequesttool.md: + id: bdf961d2c886 + last_write_checksum: sha1:5355f8c97b2aef98aebff251e1f4830ddbaa7881 + pristine_git_object: e358b1edb9035667104700dde890bb0b43074543 + docs/models/updatedocumentrequest.md: + id: ee4e094a6aa7 + last_write_checksum: sha1:4c4d774c67449402eb7e1476b9d0fef5b63f2b99 + pristine_git_object: 7e0b41b7be9f559b27a3430f46ed53d0453f6e03 + docs/models/updatelibraryrequest.md: + id: 2eda82f12f31 + last_write_checksum: sha1:436e08988daa8ca04ece36a4790ed84e0629b81a + pristine_git_object: aaffc5a9f0d588ff935db2ec2c079af9f162c2c3 + docs/models/updatemodelrequest.md: + id: 8eabdced3e0e + last_write_checksum: sha1:96879df11c005b591f2e59975897feff8fc8656e + pristine_git_object: 56b84c59c48ac135345394235c71ce77d384e33e + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + docs/models/wandbintegration.md: + id: ba1f7fe1b1a3 + last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf + pristine_git_object: c73952d9e79ea8e08bc1c17817e74e3650def956 + docs/models/wandbintegrationresult.md: + id: 729c2601b338 + last_write_checksum: sha1:49f442907815de4661a85a3521803d80b953a17e + pristine_git_object: d12bc31191ba534a9744d78f657c19e7f93f777a + docs/models/websearchpremiumtool.md: + id: 267988aa8c3f + last_write_checksum: sha1:38f80a43f73a13ddedc7730f853c092a48b665f9 + pristine_git_object: 78b736cd314617caa0d77f3c42015212e37ab539 + docs/models/websearchtool.md: + id: fc4df52fb9b5 + last_write_checksum: sha1:72636dc7ae74264bb5158d284ef6f83da5290b27 + pristine_git_object: 4ca7333c412ad819e3e02c61debe402e3f9b0af9 + docs/sdks/accesses/README.md: + id: 2ea167c2eff2 + last_write_checksum: sha1:279d3b3a4f625b89b25e9a2a47886ac6008b3ca0 + pristine_git_object: c50456df9ea2bb71f78a83ad28f90e089d2e2cd7 + docs/sdks/agents/README.md: + id: 5965d8232fd8 + last_write_checksum: sha1:a73ae6719acef32b47be55ea5c5684e91f7eda68 + pristine_git_object: 8a60837030b9e5dd0adca0d07d9f0266158b080f + docs/sdks/batchjobs/README.md: + id: a3b8043c6336 + last_write_checksum: sha1:b4b3123ff210545048e2b0c729f2b7e5f7460f4e + pristine_git_object: 3633fe4ee136c1ac90f9446425f62a0d68fa4f90 + docs/sdks/betaagents/README.md: + id: 5df79b1612d8 + last_write_checksum: sha1:9ec1c7a967bc653fe175a7986ddec74d5feb0714 + pristine_git_object: aaa5110e6db30f5450877b67d70d46e53b98996b + docs/sdks/chat/README.md: + id: 393193527c2c + last_write_checksum: sha1:5e7a43def5636140d70a7c781ed417e527ce9819 + pristine_git_object: 1bf4aeadc762f5d696c278eefaa759f35993e9d5 + docs/sdks/classifiers/README.md: + id: 74eb09b8d620 + last_write_checksum: sha1:9f11740f8cf1a3af44fff15b63916305f1882505 + pristine_git_object: dc0f4984380b5b137266421e87a1505af5260e89 + docs/sdks/conversations/README.md: + id: e22a9d2c5424 + last_write_checksum: sha1:4c5f8ea93d560956cb23c26e0d5f6d7cbc129e07 + pristine_git_object: e77d329b735dc21f620470bcf82220a79bc34e18 + docs/sdks/documents/README.md: + id: 9758e88a0a9d + last_write_checksum: sha1:ac7ab2598066971e8b371a3e73aa266ec697df1b + pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 + docs/sdks/embeddings/README.md: + id: 15b5b04486c1 + last_write_checksum: sha1:76cb4876eebccfd2ab9a10a1b25570477a96a5c1 + pristine_git_object: eecb5c9e991dcd2fd5c1f0688efe3b64b4c6de3b + docs/sdks/files/README.md: + id: e576d7a117f0 + last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c + pristine_git_object: 9507326be83eaf750daa12c0b1421d819b72340d + docs/sdks/fim/README.md: + id: 499b227bf6ca + last_write_checksum: sha1:5b2ce811df8d867d14fe0126f2c9619cca779f56 + pristine_git_object: 49151bf5be49ce6554679bc5c30906894a290ecb + docs/sdks/finetuningjobs/README.md: + id: 03d609f6ebdd + last_write_checksum: sha1:2d7ff255c1462d5f1dff617a1993e730ec3911ea + pristine_git_object: 4262b3a9833180ce86da43a26ee7ab27403f2cd0 + docs/sdks/libraries/README.md: + id: df9a982905a3 + last_write_checksum: sha1:e3eb0e9efb3f758fdf830aa1752c942d59a4f72b + pristine_git_object: 7df1ef4e26449af572412f052ee7ad189039544f + docs/sdks/models/README.md: + id: b35bdf4bc7ed + last_write_checksum: sha1:2aa91ffe637c049aed0d63d24ac39688b6ecb270 + pristine_git_object: 311a2db6e213902ac5a2c27acf19f856dae2c264 + docs/sdks/ocr/README.md: + id: 545e35d2613e + last_write_checksum: sha1:da377d75b6b7480c335d7f721bb06fe11492be38 + pristine_git_object: fde2a82339e10c74aca6d1b4168b62501d7bbf83 + docs/sdks/transcriptions/README.md: + id: 089cf94ecf47 + last_write_checksum: sha1:15d118796f147bc5b0bf4146ba39bfa9edfbc996 + pristine_git_object: 97703c9b4dc942385ee04ae96cbd100c3f632a17 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/client/__init__.py: + id: f1b791f9d2a5 + last_write_checksum: sha1:c05dc9845d3361c4aae7796b079ac0e7952e8606 + pristine_git_object: 4b79610a3fc8222fc8f9adeeaf798e894708fc06 + src/mistralai/client/_hooks/__init__.py: + id: cef9ff97efd7 + last_write_checksum: sha1:9a6f060871150610f890cc97676c3afe9050b523 + pristine_git_object: 66a04e3727ffcc2c427d854cdbb4f5f340af050f + src/mistralai/client/_hooks/sdkhooks.py: + id: ed1e485b2153 + last_write_checksum: sha1:e592d5ab277827b988257b4df3e746508ca91b23 + pristine_git_object: ecf94240a5689c8b248add46509bc7a7982d8437 + src/mistralai/client/_hooks/types.py: + id: 85cfedfb7582 + last_write_checksum: sha1:40294e852f818a974034c33e510e0f8723fcaf31 + pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c + src/mistralai/client/_version.py: + id: cc807b30de19 + last_write_checksum: sha1:f5109c91723cc927e8513ac9e637512edd91f04e + pristine_git_object: ab2cf01d06f4d4373b52373795db76aa40f00ceb + src/mistralai/client/accesses.py: + id: 76fc53bfcf59 + last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e + pristine_git_object: 0761b0bc6080ab0d891be70089a1908d435559fa + src/mistralai/client/agents.py: + id: e946546e3eaa + last_write_checksum: sha1:7049cab7c308888c88b0341fb29f0132e154e3cb + pristine_git_object: 2b70d1520663d999773159d89b1f9dc96f7fbf97 + src/mistralai/client/audio.py: + id: 7a8ed2e90d61 + last_write_checksum: sha1:e202d775d24c0303053e0548af83fcb04e2748f4 + pristine_git_object: f68f063c08a099d07904456daa76d8e2d2ecdbe6 + src/mistralai/client/basesdk.py: + id: 7518c67b81ea + last_write_checksum: sha1:2cea76931db51175b2c787d0c707f08e9944c22f + pristine_git_object: a976121bd224d64497e5006cb58dd728f6a67144 + src/mistralai/client/batch.py: + id: cffe114c7ac7 + last_write_checksum: sha1:b452983f67b33f26e1faa60fdbbb171cb1877224 + pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba + src/mistralai/client/batch_jobs.py: + id: 3423fec25840 + last_write_checksum: sha1:34de0e986e7c0e4377f70125d319e522280c565f + pristine_git_object: 0e135b30cd122d1a813ee67bf2f9037953448e73 + src/mistralai/client/beta.py: + id: 981417f45147 + last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 + pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 + src/mistralai/client/beta_agents.py: + id: b64ad29b7174 + last_write_checksum: sha1:7c900a6b1483108a367050440667c069b08fbb92 + pristine_git_object: 157c5de4c66273e6df468f8a12b4399f9efb32fb + src/mistralai/client/chat.py: + id: 7eba0f088d47 + last_write_checksum: sha1:520b0da011d63c60bd0d3a960a410a8f4a6a3e22 + pristine_git_object: 13b9c01f035c4fd6f60b78f20a1801bedf3b582b + src/mistralai/client/classifiers.py: + id: 26e773725732 + last_write_checksum: sha1:ee94a4e50cda893f9c19c2304adda8b23fc2de9e + pristine_git_object: 67199b601e38dff6fc6a4317eb845fbde6c25de0 + src/mistralai/client/conversations.py: + id: 40692a878064 + last_write_checksum: sha1:1101b9e374010ba9cb080c30789672cfcfc45c55 + pristine_git_object: ec33b1fb12d1923ef5f686ed09c5fe5ae889e40c + src/mistralai/client/documents.py: + id: bcc17286c31c + last_write_checksum: sha1:37669f51eba1b352a5e3c7f3a17d79c27c7ea772 + pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 + src/mistralai/client/embeddings.py: + id: f9c17258207e + last_write_checksum: sha1:0fbf92b59fde3199c770a522ead030f8fa65ff5c + pristine_git_object: 5f9d3b9cb611943e509caeda9ddd175e3baee2c3 + src/mistralai/client/errors/__init__.py: + id: 0b2db51246df + last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 + pristine_git_object: 58a591a1cc2896f26df2075ffca378ca6c982d1e + src/mistralai/client/errors/httpvalidationerror.py: + id: ac3de4a52bb6 + last_write_checksum: sha1:73251adb99a07d11b56d0bc0399a2362ff9ccdba + pristine_git_object: 97b165629c39ab6e24406eb3f13970414b73f8f7 + src/mistralai/client/errors/mistralerror.py: + id: d1f57f0ff1e9 + last_write_checksum: sha1:30065cdd7003ec02cb3463d7c63229c4ff97503c + pristine_git_object: eb73040c5b5251018695204fde80eac914b35dae + src/mistralai/client/errors/no_response_error.py: + id: 8b469ecb0906 + last_write_checksum: sha1:0b3fdb1136472c41a4a739a5cbf9e2a4ce0c63a4 + pristine_git_object: d71dfa7b24146f1390ac6830e61acf337b99ca83 + src/mistralai/client/errors/responsevalidationerror.py: + id: 6cfaa3147abe + last_write_checksum: sha1:6862d178d4d1964bc03db47b76709aa406546981 + pristine_git_object: a7b3b9f0207846b5f176076b9f400e95cb08ebb9 + src/mistralai/client/errors/sdkerror.py: + id: c489ffe1e9ca + last_write_checksum: sha1:f708168e46c2960dd51896083aee75ccdb36f9dd + pristine_git_object: 25b87255a51021079f8ba5cc60b43509e12f9a4d + src/mistralai/client/files.py: + id: f12df4b2ce43 + last_write_checksum: sha1:a16c8702d15339200b09c62948c06f79e720d79c + pristine_git_object: a5f3adf6dd9b60a202c70edf7d2a148a626ce471 + src/mistralai/client/fim.py: + id: 217bea5d701d + last_write_checksum: sha1:dc427c9e954dfb9a7fe2df8b5c544877a28cdc73 + pristine_git_object: 8ffb7730a03398322dfdd6c83724096d4924c5c5 + src/mistralai/client/fine_tuning.py: + id: 5d5079bbd54e + last_write_checksum: sha1:fe1f774df4436cc9c2e54ed01a48db573eb813cd + pristine_git_object: df6bc5643a13294ddfbeecc6ae84d00cd7199bed + src/mistralai/client/fine_tuning_jobs.py: + id: fa1ea246e0b2 + last_write_checksum: sha1:8cbf3827f5c2e43170192de39be498af0bf24cf0 + pristine_git_object: c2ee871bb1ccf7e3e24081121a7e54f1483eee5c + src/mistralai/client/httpclient.py: + id: 3e46bde74327 + last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d + pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 + src/mistralai/client/libraries.py: + id: d43a5f78045f + last_write_checksum: sha1:6440b3df71fe557ecba5c23768d115efd4ceb26f + pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 + src/mistralai/client/models/__init__.py: + id: e0e8dad92725 + last_write_checksum: sha1:50727667552480e8298431f5a3dcc78457c53331 + pristine_git_object: 5ef8b3f3dd9fbb32d4675f7e11808c29fc218c57 + src/mistralai/client/models/agent.py: + id: 1336849c84fb + last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 + pristine_git_object: 686a6eb84ecd27e725e3773b3f7773dddac1c10c + src/mistralai/client/models/agentaliasresponse.py: + id: 3899a98a55dd + last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 + pristine_git_object: 6972af2a4ae846e63d2c70b733ecd6c8370ee0cd + src/mistralai/client/models/agentconversation.py: + id: 1b7d73eddf51 + last_write_checksum: sha1:28718fb00dbe74241712b4f7a3fbce2d060f7e86 + pristine_git_object: da30c6634294cdaba459b68ca8877d867ee052fb + src/mistralai/client/models/agenthandoffdoneevent.py: + id: 82628bb5fcea + last_write_checksum: sha1:829c5a152e6d737ffd65a3b88b0b2890e6703764 + pristine_git_object: e2609e3d1fb62b132eb53112eb2bdc4ae855085f + src/mistralai/client/models/agenthandoffentry.py: + id: 5030bcaa3a07 + last_write_checksum: sha1:c9544755ad6d3a3831f8afe446c6a9a523eb5137 + pristine_git_object: f92ef2cc7310d5df94436f3067a640d3848405f0 + src/mistralai/client/models/agenthandoffstartedevent.py: + id: 2f6093d9b222 + last_write_checksum: sha1:c9f86e01497c53f3c1806dbb9fdff6e2d9993323 + pristine_git_object: 2a4023419212fec8b3f0e83d506a25b17408a8b1 + src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: 23a832f8f175 + last_write_checksum: sha1:237d6b4419615c9c26f96d49760732bd7b4617e7 + pristine_git_object: 04761ae786c35e6fa6cd5a896a5e52458cb3a5d5 + src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py: + id: 9c9947e768d3 + last_write_checksum: sha1:385faebecef8479d1a72a7ab6f15ddcc611dad87 + pristine_git_object: 291a9802a7d49108fc0d428610cb4c37b42f0796 + src/mistralai/client/models/agents_api_v1_agents_deleteop.py: + id: 95adb6768908 + last_write_checksum: sha1:f222a61a73ba2f37051fffbf2d19b3b81197d998 + pristine_git_object: 5e41fdcdbf182e993acd71603ecb8c9a14e48043 + src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: + id: ef9914284afb + last_write_checksum: sha1:c99ee098f659a56cb365c280cc29de441916b48a + pristine_git_object: 941863d0f8143020200bb5566ce66d527c4369c8 + src/mistralai/client/models/agents_api_v1_agents_getop.py: + id: f5918c34f1c7 + last_write_checksum: sha1:b90285965e2aaccaf989e59b8f1db4a53ae8b31c + pristine_git_object: dd17580dd0041a979fc6c9c7349d14a3e200f5d3 + src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: + id: a04815e6c798 + last_write_checksum: sha1:b4b5c4e8566f1d0c68a14aba94b7ffea257fd7ce + pristine_git_object: bb1da6020386fabfbd606db9a098a0e9323ce3b0 + src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: + id: 19e3310c3907 + last_write_checksum: sha1:6628e9ff747c579e11fa9a756cee3b11c57c476d + pristine_git_object: 54b62e90e23c1782a0b068460d6877cac3b28916 + src/mistralai/client/models/agents_api_v1_agents_listop.py: + id: 25a6460a6e19 + last_write_checksum: sha1:0abe889b85470b28917368a2b958a13303bd38f1 + pristine_git_object: 97b1c7f1a070be5e12e1a32ad56dbcfcb0f1cd68 + src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: + id: 63f61b8891bf + last_write_checksum: sha1:e9046cf75e008e856f00dda8725cbb16d83cd394 + pristine_git_object: 5ab821ea413d656dc7194f3588c8987c3e720831 + src/mistralai/client/models/agents_api_v1_agents_updateop.py: + id: bb55993c932d + last_write_checksum: sha1:bc922e15651d7bb33b841d9b3ae247843b6a5426 + pristine_git_object: 69da5001007916e458cab6caf8c10073c8fbc7d6 + src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: + id: ec00e0905f15 + last_write_checksum: sha1:d0a253c2f383241378e6fab35a38427d0a1dd827 + pristine_git_object: d257dc789cdc4f57bb91d1788335d2d49442d02f + src/mistralai/client/models/agents_api_v1_conversations_appendop.py: + id: 39c6125e850c + last_write_checksum: sha1:864ece4ddcd65075547daa1ab996ba7cfe9939fc + pristine_git_object: 61fec0834e6e05a56a7ee5c984fb0401f9c72f5c + src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: + id: 0792e6abbdcb + last_write_checksum: sha1:9725fce86a52b4995a51e1995ca114c0c4b414df + pristine_git_object: 499645a77782e29db61e439060340fee787799c1 + src/mistralai/client/models/agents_api_v1_conversations_getop.py: + id: c530f2fc64d0 + last_write_checksum: sha1:241e5a07f37fa88f1e5011615b3e2b47a1aaf6a7 + pristine_git_object: 504616abbf0c9d0595f2aae81c59e52352cee323 + src/mistralai/client/models/agents_api_v1_conversations_historyop.py: + id: 2f5ca33768aa + last_write_checksum: sha1:fccc3e1a3f48eff31463829037a440be667a7da1 + pristine_git_object: ef0a4eb084de52d4bde435ee9751aaa12e61dcc3 + src/mistralai/client/models/agents_api_v1_conversations_listop.py: + id: 936e36181d36 + last_write_checksum: sha1:e3e52cf7967b9b78099db9449cb33e3ded34d111 + pristine_git_object: 8bf66aea23f16734c1f9e03629aaf7246e4e60b4 + src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: + id: b5141764a708 + last_write_checksum: sha1:17fd503da7fb20198792c6e25f94dcc0a1e5db05 + pristine_git_object: 19978a194e2dd633fe89bcee7ceac177fcdd6629 + src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: + id: c284a1711148 + last_write_checksum: sha1:2e462249c8ab71376c5f6179a2c033e254165f3e + pristine_git_object: 63c744498dfbdd254f2e780d90a680b10100ee63 + src/mistralai/client/models/agents_api_v1_conversations_restartop.py: + id: 3ba234e5a8fc + last_write_checksum: sha1:5dd06d300dbe8832b72d868657dc4c58f0ebaad5 + pristine_git_object: 3186d5df9000d4a62c0fbc64a601e6b709803deb + src/mistralai/client/models/agentscompletionrequest.py: + id: 3960bc4c545f + last_write_checksum: sha1:5d81a0421184ed547208e8ea7cff47b18fc00788 + pristine_git_object: 6955f6acb023fd842d9ec46a694d270a66911c0e + src/mistralai/client/models/agentscompletionstreamrequest.py: + id: 1b73f90befc2 + last_write_checksum: sha1:b46298a653359bca205b6b1975bcd1909e563dff + pristine_git_object: c2cf35522236f29ca1b9f2a438dfc79a59ca3e2a + src/mistralai/client/models/apiendpoint.py: + id: 00b34ce0a24d + last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b + pristine_git_object: a6665c1076f05c28936510c24ee7d3498d7e7a24 + src/mistralai/client/models/archivemodelresponse.py: + id: 2d22c644df64 + last_write_checksum: sha1:d0f67fd2bc5a6e8de4f2b0a5742ceb4a1f7f5ab8 + pristine_git_object: f1116850c8bf0159c4146d4973988ea5d0fe7de7 + src/mistralai/client/models/assistantmessage.py: + id: 2b49546e0742 + last_write_checksum: sha1:dcfa31c2aac95a0d7bd748e96bd87a5c85c0d1f6 + pristine_git_object: 26a778c70439d21b890e85f2c85dbb560e8bffef + src/mistralai/client/models/audiochunk.py: + id: ce5dce4dced2 + last_write_checksum: sha1:d3c2e28583d661a9583c40c237430a1f63ea7631 + pristine_git_object: 68866cd2c3c640cf56258f2f98b8a2385ea6fcdb + src/mistralai/client/models/audioencoding.py: + id: b14e6a50f730 + last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce + pristine_git_object: 67fec75d72845b8dda774c96767a0b233f269fe5 + src/mistralai/client/models/audioformat.py: + id: c8655712c218 + last_write_checksum: sha1:8ee7b564d106b601b6ad8a9321c27dfff421ce5e + pristine_git_object: fef87ae76b31128ebd5ced4278e274c249181c23 + src/mistralai/client/models/audiotranscriptionrequest.py: + id: e4148b4d23e7 + last_write_checksum: sha1:a6ef85be4ae24aa79c8c3fa9dcaf055e0ba9b266 + pristine_git_object: fe4c79e3427fae3e022bd936236d2934eaa76b60 + src/mistralai/client/models/audiotranscriptionrequeststream.py: + id: 33a07317a3b3 + last_write_checksum: sha1:6e648ff58a70a0a3bd63a24676122b80eba4baf7 + pristine_git_object: 2d1e9269b51d84cd8b21643fe04accd00839b013 + src/mistralai/client/models/basemodelcard.py: + id: 556ebdc33276 + last_write_checksum: sha1:e2c3d1effee5b434fea9b958c0dd54fa96143924 + pristine_git_object: 9c9e9a2045a10f4606f11ee5886a19ccf03bbf0e + src/mistralai/client/models/batcherror.py: + id: 1563e2a576ec + last_write_checksum: sha1:51c9e9a4d306c2de45dc0879ade62daed3fc2972 + pristine_git_object: 8a353cd2dc06a8c6f2db3d6b613cfdca8278f57e + src/mistralai/client/models/batchjob.py: + id: 85cd28932cc7 + last_write_checksum: sha1:532a8c6ca8546052159e5e5174cf65ce17a62f3f + pristine_git_object: 80acac336883c23b621d0dc647fef20548bf061a + src/mistralai/client/models/batchjobstatus.py: + id: 61e08cf5eea9 + last_write_checksum: sha1:78934183519948464385245cbc89efb68ac00bfb + pristine_git_object: bd77faa2fbed74b19a8d3884af6d43bc1b4806e0 + src/mistralai/client/models/batchrequest.py: + id: 6f36819eeb46 + last_write_checksum: sha1:b2a71163e37a9483e172dc13b6320749bee38f2f + pristine_git_object: 911a9a0554b9b8cb6dedcb3a86a06c39890b875e + src/mistralai/client/models/builtinconnectors.py: + id: 2d276ce938dc + last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 + pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a + src/mistralai/client/models/chatclassificationrequest.py: + id: afd9cdc71834 + last_write_checksum: sha1:a29088359142ebd6409f45569168b2096014119e + pristine_git_object: cf2aa78af3ffc747d557422b83551075b83e601d + src/mistralai/client/models/chatcompletionchoice.py: + id: 7e6a512f6a04 + last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 + pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f + src/mistralai/client/models/chatcompletionrequest.py: + id: 9979805d8c38 + last_write_checksum: sha1:1f0390718ab06126a05e06797ef6af310ccab543 + pristine_git_object: e871bd92733ac400fdfeb2cf4f66fc32a7584103 + src/mistralai/client/models/chatcompletionresponse.py: + id: 669d996b8e82 + last_write_checksum: sha1:97f164fea881127ac82303e637b6a270e200ac5b + pristine_git_object: 7092bbc18425091d111ec998b33edc009ff0931b + src/mistralai/client/models/chatcompletionstreamrequest.py: + id: 18cb2b2415d4 + last_write_checksum: sha1:c197792ed1dd78159ab0b970f8f76087ff2c4d6b + pristine_git_object: b7b2bff138cee9c130fa01d6157d8b6c21ea5a9c + src/mistralai/client/models/chatmoderationrequest.py: + id: 057aecb07275 + last_write_checksum: sha1:7677494c0e36ccbc201384cb587abeb852a1a924 + pristine_git_object: 228e7d26b8b172c3e11f01d4f260bf6e5195b318 + src/mistralai/client/models/checkpoint.py: + id: 1a530d3674d8 + last_write_checksum: sha1:418f08c61b64fa7ffb053c6f5912e211acab1330 + pristine_git_object: c24e433eb4787146620fb48b6d301f51a4db5067 + src/mistralai/client/models/classificationrequest.py: + id: 6942fe3de24a + last_write_checksum: sha1:7bd416d4b0e083efbf9324107263027140702ddb + pristine_git_object: 25b6941355cb9629abb9c0f09fb6fd191c56ffa6 + src/mistralai/client/models/classificationresponse.py: + id: eaf279db1109 + last_write_checksum: sha1:64522aa2b0970e86a0133348411592f95163f374 + pristine_git_object: d2f09f430c4bca39ea9e5423b7d604ea4016fc70 + src/mistralai/client/models/classificationtargetresult.py: + id: 2445f12b2a57 + last_write_checksum: sha1:2b8b9aeadee3b8ffe21efd1e0c842f9094c4ecc7 + pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c + src/mistralai/client/models/classifierfinetunedmodel.py: + id: 5a9a7a0153c8 + last_write_checksum: sha1:853bf1b3b941ec3aebeb17ac2caf38fa0dd094de + pristine_git_object: fbcf5892d7f0a3ed8b3872d71dd95ed3a25463d1 + src/mistralai/client/models/classifierfinetuningjob.py: + id: a244d5f2afc5 + last_write_checksum: sha1:ceb13935702275025284bb77aa8bf5ccf926e19c + pristine_git_object: fb160cf8e16a1b4899f8bb2803b18ba1f55232ce + src/mistralai/client/models/classifierfinetuningjobdetails.py: + id: 75c5dee8df2e + last_write_checksum: sha1:6b3f2f7ca3bd4e089591f5f9c59b7e28a00447f8 + pristine_git_object: 5d73f55ee0f1321fdeeb4db1971e144953e8e27f + src/mistralai/client/models/classifiertarget.py: + id: 2177d51d9dcf + last_write_checksum: sha1:c801dacc31e2d7682285a9a41d8ef38fa2e38fb9 + pristine_git_object: 4d66d789a42a0bc8762998161f1ad801bd8d96d4 + src/mistralai/client/models/classifiertargetresult.py: + id: 19c343844888 + last_write_checksum: sha1:3f5b37de3585cb38a3e41f0ee49dc4b5a33bf925 + pristine_git_object: 8ce7c0ca167b38ebaf1e5fc6393ab56d9f142cfa + src/mistralai/client/models/classifiertrainingparameters.py: + id: 4000b05e3b8d + last_write_checksum: sha1:d7ce2f1017463c52856b973d696c9abecf5f79e3 + pristine_git_object: 14fa4926f8b5b62aa6b5d8864c40d5acf66e7b15 + src/mistralai/client/models/codeinterpretertool.py: + id: 950cd8f4ad49 + last_write_checksum: sha1:8c3d91805d6c5f5cc9d249216694781faf15ea68 + pristine_git_object: ce14265f6d312c3da52014d2a058b6a730d5c980 + src/mistralai/client/models/completionargs.py: + id: 3db008bcddca + last_write_checksum: sha1:e3d36235610c0546d8a2f2bb0a1db0f953747d88 + pristine_git_object: ab5cf5ff2d4df92d00664803f9274696ae80216d + src/mistralai/client/models/completionargsstop.py: + id: 5f339214501d + last_write_checksum: sha1:744878976d33423327ea257defeff62073dad920 + pristine_git_object: 39c858e66380044e11d3c7fd705334d130f39dea + src/mistralai/client/models/completionchunk.py: + id: d786b44926f4 + last_write_checksum: sha1:15f1b57b696b46bf6986c8f1a53d6bbf8d2351e2 + pristine_git_object: 5fd6c173ef29fb9bf2f570e0c2300268221e1ad3 + src/mistralai/client/models/completionevent.py: + id: c68817e7e190 + last_write_checksum: sha1:dc43ac751e4e9d9006b548e4374a5ec44729eea4 + pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f + src/mistralai/client/models/completionfinetunedmodel.py: + id: f08c10d149f5 + last_write_checksum: sha1:5fbd8c5475c250cbed1c2d2f47de372e8e92b128 + pristine_git_object: 54a1c1656aea1954288e9144670c939e29a83c47 + src/mistralai/client/models/completionfinetuningjob.py: + id: c242237efe9b + last_write_checksum: sha1:e4352be2411c7026c054a6fe380b87242183d4e4 + pristine_git_object: 1bf0a730c389be30bac2acfa17ffc6b5891e4918 + src/mistralai/client/models/completionfinetuningjobdetails.py: + id: e8379265af48 + last_write_checksum: sha1:b11c9bdc161da6a5cbd9f35f4bc5b51f0f3cea9c + pristine_git_object: cb7870219b261e260feceb6109088b0bbf8a6408 + src/mistralai/client/models/completionresponsestreamchoice.py: + id: 5969a6bc07f3 + last_write_checksum: sha1:59730cdaeeb3e95f4d38f63c34a4e491f40e6010 + pristine_git_object: a52ae892fcaafe54918160d055ee2badac31404e + src/mistralai/client/models/completiontrainingparameters.py: + id: be202ea0d5a6 + last_write_checksum: sha1:1a797019770795edcd911ff5b3580bedb83c05f4 + pristine_git_object: ca50a7ad521b46f275dd3a39c98911f13ee527c8 + src/mistralai/client/models/contentchunk.py: + id: c007f5ee0325 + last_write_checksum: sha1:b921b03b4c1e300b0e3f51ea9eadd4d7c4b7a0ea + pristine_git_object: e3de7591a089a3739af17108cecdc2d4240f10bf + src/mistralai/client/models/conversationappendrequest.py: + id: 81ce529e0865 + last_write_checksum: sha1:bdae860241893ec3ab3f22bd57c45dede2927da3 + pristine_git_object: 386714fd6dcccff8abb2247d7474949d9e8e79f8 + src/mistralai/client/models/conversationappendstreamrequest.py: + id: 27ada745e6ad + last_write_checksum: sha1:0a563cb146c4806ee6a133d10e7af8839e6f38dd + pristine_git_object: 32f6b148c647d3bac8edada3b941c51c17d78901 + src/mistralai/client/models/conversationevents.py: + id: 8c8b08d853f6 + last_write_checksum: sha1:2eedde1ecf31061fb13de0b1bdc9ea311897b570 + pristine_git_object: 17812983f3aee3e675d44f46ca1b741315c2139a + src/mistralai/client/models/conversationhistory.py: + id: 60a51ff1682b + last_write_checksum: sha1:8984a0b12766e350022796a44baf6aac4c93f79b + pristine_git_object: ceef115b70ff02da05ac97571a177edf5b5f6cf6 + src/mistralai/client/models/conversationinputs.py: + id: 711b769f2c40 + last_write_checksum: sha1:5fc688af61d6a49ede9c9709069f3db79f4dc615 + pristine_git_object: 7ce3ffc3772926a259d714b13bfc4ee4e518f8f7 + src/mistralai/client/models/conversationmessages.py: + id: 011c39501c26 + last_write_checksum: sha1:95e3abe55199f2118e6fb7e5d8520af6a929449a + pristine_git_object: 84664b62337dcdc408bb01e0494fa598e6a86832 + src/mistralai/client/models/conversationrequest.py: + id: 58e3ae67f149 + last_write_checksum: sha1:f7a67082e06c1789f4c6a4c56bfef5f21cce5034 + pristine_git_object: 83d599ebf984f1df2390d97dbe651881f7dee0e2 + src/mistralai/client/models/conversationresponse.py: + id: ad7a8472c7bf + last_write_checksum: sha1:99148d75abcb18c91ba0a801174461346508f5fb + pristine_git_object: f6c10969a931eaf1a4667b0fcff3765f57658b15 + src/mistralai/client/models/conversationrestartrequest.py: + id: 681d90d50514 + last_write_checksum: sha1:99123cee7c54f44c02b56111305af399143b4e5a + pristine_git_object: 7ae16aff4de36a91093d3021b66283e657b00897 + src/mistralai/client/models/conversationrestartstreamrequest.py: + id: 521c2b5bfb2b + last_write_checksum: sha1:abfd14652b4785c36de84a59593b55f7a6a2d613 + pristine_git_object: 0e247261d997ac3d8ff0155ba54cc4cafe9ac65a + src/mistralai/client/models/conversationstreamrequest.py: + id: 58d633507527 + last_write_checksum: sha1:7dc25a12979f4082ed7d7e37584bb9c30297f196 + pristine_git_object: a20dccae1a60753ed95f59da0df78c204c19d515 + src/mistralai/client/models/conversationthinkchunk.py: + id: 77e59cde5c0f + last_write_checksum: sha1:5db067661a5d4b0c13db92ad93da1aab9e0e7a34 + pristine_git_object: e0e172e3edbe46c000e82e712c135b96a65312e9 + src/mistralai/client/models/conversationusageinfo.py: + id: 6685e3b50b50 + last_write_checksum: sha1:3e0489836936a7a77fa3b41adde1eb459ecd176d + pristine_git_object: 1e80f89ee4f7a3d464df2bf39990b467029e86c1 + src/mistralai/client/models/createagentrequest.py: + id: 442629bd914b + last_write_checksum: sha1:273dde9338cc1eb166ee40f4c6215f90cae908ab + pristine_git_object: 54b09880eefe348d2e003ed1b238b67cb58b8e34 + src/mistralai/client/models/createbatchjobrequest.py: + id: 56e24cd24e98 + last_write_checksum: sha1:e648017622cd6e860cb15e5dd2b29bf9f2a00572 + pristine_git_object: 9a901fefee0ea6a825274af6fd0aa5775a61c521 + src/mistralai/client/models/createfileresponse.py: + id: fea5e4832dcc + last_write_checksum: sha1:b7f3ba95a09a3225eae80b53152fe2b7d3806fbe + pristine_git_object: 768212803bc3535ac8a27a9c0d48f147e3d536b7 + src/mistralai/client/models/createfinetuningjobrequest.py: + id: c60d2a45d66b + last_write_checksum: sha1:2e8e608140860bba9ecfa9498d61cf807f96680a + pristine_git_object: e328d944ce2a71ffbec027965d31075070647dbc + src/mistralai/client/models/createlibraryrequest.py: + id: 1c489bec2f53 + last_write_checksum: sha1:45fa65be82712ce99304027c88f953f0932bdae4 + pristine_git_object: 58874e014275b06ce19d145aaa34a48d11ca0950 + src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: + id: 767aba526e43 + last_write_checksum: sha1:73568f2f450bf9c23aca3649372a92e1b9a2fc54 + pristine_git_object: 199614f53501f34088cb112d6fe1114e1e588d8a + src/mistralai/client/models/deletefileresponse.py: + id: 3ee464763a32 + last_write_checksum: sha1:2c0df66fc8c4384d50e54ac03577da3da2997cf5 + pristine_git_object: ffd0e0d015e38e5f6113da036ebeba98441444f4 + src/mistralai/client/models/deletemodelout.py: + id: ef6a1671c739 + last_write_checksum: sha1:d67ac7c3fa143be40c74455c7206c94bfb5a2134 + pristine_git_object: fa0c20a419c59b8fc168c150b28d703398ea7f40 + src/mistralai/client/models/deltamessage.py: + id: 68f53d67a140 + last_write_checksum: sha1:b18350de03a8685bea5ac52e1441415b5e58bdf4 + pristine_git_object: d9fa230e93d4e0886f21c836cf3813855eb8f9fd + src/mistralai/client/models/document.py: + id: fbbf7428328c + last_write_checksum: sha1:2a5a28c54f0aec50059b6badc1001b1cd120e7d3 + pristine_git_object: 31eebbd1a7d7fdcb498259837c533bfc8008a6f9 + src/mistralai/client/models/documentlibrarytool.py: + id: 3eb3c218f457 + last_write_checksum: sha1:d03a6136192b56778bd739d834d9bdc80a09cc23 + pristine_git_object: 642c3202b11c5bb8a2b41cf8ae0fe43f73aa2a81 + src/mistralai/client/models/documenttextcontent.py: + id: e730005e44cb + last_write_checksum: sha1:c86f4b15e8fda1cd5c173da01462342cd22b7286 + pristine_git_object: b6904cb4267347b62a457a01b91a391500326da9 + src/mistralai/client/models/documenturlchunk.py: + id: 4309807f6048 + last_write_checksum: sha1:33cdaccb3a4f231730c7fa1db9f338a71e6311b2 + pristine_git_object: 43444d98b8b7fb430f9c33562c35072d9c79a263 + src/mistralai/client/models/embeddingdtype.py: + id: 77f9526a78df + last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 + pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb + src/mistralai/client/models/embeddingrequest.py: + id: eadbe3f9040c + last_write_checksum: sha1:e36282eb015b782804b4bdf3d18b596607b020fd + pristine_git_object: 15950590fec8b82a4fb28d69009a6f6cfb83c9ee + src/mistralai/client/models/embeddingresponse.py: + id: f7d790e84b65 + last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 + pristine_git_object: 6ffd68941f32f396998df9dded14ff8365926608 + src/mistralai/client/models/embeddingresponsedata.py: + id: 6d6ead6f3803 + last_write_checksum: sha1:ba5f38ee6e2b0436532229da01ba79ee49c20d12 + pristine_git_object: 098cfae06eae6a92830b4b5a26985f5d5950e512 + src/mistralai/client/models/encodingformat.py: + id: b51ec296cc92 + last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7 + pristine_git_object: 4a39d0295f9069ae9f749cf21dab450eaf145d19 + src/mistralai/client/models/entitytype.py: + id: 62d6a6a13288 + last_write_checksum: sha1:015e2db9e8e5a3e4ce58442ccedaf86c66239dde + pristine_git_object: 56d82cbed237f32a8b00cfee4042dfe3e7053bcb + src/mistralai/client/models/event.py: + id: e5a68ac2dd57 + last_write_checksum: sha1:8ed848fe2e74c7f18ee8f4dcba39ad1c951c16d2 + pristine_git_object: c40ae2b1a1b8131a90c637e3268872b97b22683e + src/mistralai/client/models/file.py: + id: f972c39edfcf + last_write_checksum: sha1:609381a40a4bfdda2e7e750a848cd2bb38d6ac0f + pristine_git_object: 1b0ea1d4a288d9723dcdd7cfda99d49c5cbd9e7c + src/mistralai/client/models/filechunk.py: + id: ff3c2d33ab1e + last_write_checksum: sha1:d7561c39252b81007a8e079edb4f23989ffd510e + pristine_git_object: 5c8d2646dc0d5c732828bdd81c5a58e12fa92a42 + src/mistralai/client/models/filepurpose.py: + id: a11e7f9f2d45 + last_write_checksum: sha1:8b167c02f9f33e32d5fd1c6de894693924f4d940 + pristine_git_object: 49a5568ff82ad4a85e15c8de911e8d6c98dcd396 + src/mistralai/client/models/files_api_routes_delete_fileop.py: + id: 2f385cc6138f + last_write_checksum: sha1:ccfd3ff64635cfd511f49c5e02a6f1860c479966 + pristine_git_object: eaba274b9dd94d6cf729325316b3e3e9b3834566 + src/mistralai/client/models/files_api_routes_download_fileop.py: + id: 8184ee3577c3 + last_write_checksum: sha1:81058ede2a5eb333b54561f99ed7878082c0f411 + pristine_git_object: 83de8e73a3d50917e4a41bb92a828a10e646a632 + src/mistralai/client/models/files_api_routes_get_signed_urlop.py: + id: 0a1a18c6431e + last_write_checksum: sha1:ef4908b9d2e43c0256d25a5aa533c5bdc1205113 + pristine_git_object: 64cd6ac57b4f2de70403e11062307a8d8d5d94e7 + src/mistralai/client/models/files_api_routes_list_filesop.py: + id: b2e92f2a29b4 + last_write_checksum: sha1:71e67fc63f0df28c534d4bd03a6464ae88959dc2 + pristine_git_object: b03e2f886ce02d4beabca150302a924ae63ad507 + src/mistralai/client/models/files_api_routes_retrieve_fileop.py: + id: 5d5dbb8d5f7a + last_write_checksum: sha1:d451d8d2b32f412158a074919cca1a72f79940cb + pristine_git_object: 5f8de05f1bba07517dc2ee33a4f05122503b54b5 + src/mistralai/client/models/files_api_routes_upload_fileop.py: + id: f13b84de6fa7 + last_write_checksum: sha1:d38a86b9e7d338278e14c68756654d85bc330070 + pristine_git_object: 54ff4e4951a58e13993be0f5d2c16b0cb11c0978 + src/mistralai/client/models/fileschema.py: + id: 19cde41ca32a + last_write_checksum: sha1:0b3acb889a2c70998da4076e2f4eef3698e8b117 + pristine_git_object: e99066a9eb19daebcf29f356225635a297c444e1 + src/mistralai/client/models/fimcompletionrequest.py: + id: cf3558adc3ab + last_write_checksum: sha1:20bca1f6a0ab6e84f48b6e332f0c3242da84ae45 + pristine_git_object: ea877213d1abe4811fee188eb7a60ccf1bb51f18 + src/mistralai/client/models/fimcompletionresponse.py: + id: b860d2ba771e + last_write_checksum: sha1:dffd5a7005999340f57eaa94e17b2c82ddc7fd90 + pristine_git_object: 1345a116b7855ab4b824cf0369c0a5281e44ea97 + src/mistralai/client/models/fimcompletionstreamrequest.py: + id: 1d1ee09f1913 + last_write_checksum: sha1:aa8313ecdd852034aaf6ec23dc3f04f7ef8e28e5 + pristine_git_object: e80efc095feb2e2df87f6d3c3f9c56b6cbf347b3 + src/mistralai/client/models/finetuneablemodeltype.py: + id: 05e097395df3 + last_write_checksum: sha1:daf4cd1869da582981023dea1074268da071e16a + pristine_git_object: 7b924bd7abc596f0607a513eee30e98cbf7ab57a + src/mistralai/client/models/finetunedmodelcapabilities.py: + id: 475c805eab95 + last_write_checksum: sha1:5919e48a6778f1a2360ce090d05b41b1bf33253f + pristine_git_object: 2f4cca0b8c0e3e379f5c2aa67953f2e55757f68d + src/mistralai/client/models/ftclassifierlossfunction.py: + id: d21e2a36ab1f + last_write_checksum: sha1:ca90e2f1cd0b9054293bea304be0867c93f7fac2 + pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af + src/mistralai/client/models/ftmodelcard.py: + id: c4f15eed2ca2 + last_write_checksum: sha1:b1b36ff994bcadd8c917880333627fd05976c991 + pristine_git_object: 2c26ff2f66faa55dc5a5a1743720e8f3f5d4d0f1 + src/mistralai/client/models/function.py: + id: 32275a9d8fee + last_write_checksum: sha1:ca24a512de22787932d7f4af005699621926d6c0 + pristine_git_object: 1da1dcc9b637d0a5b0fbb7cf2761f6d01eb3068f + src/mistralai/client/models/functioncall.py: + id: 393fca552632 + last_write_checksum: sha1:6e96e9abaa9b7625a9a30e376c31b596ee9defcb + pristine_git_object: 527c3ad408e1e1ccfe6301a8860e7f751e1d312d + src/mistralai/client/models/functioncallentry.py: + id: cd058446c0aa + last_write_checksum: sha1:776f397d17f946bae2929998f14d991a1ccc99e0 + pristine_git_object: d05fad856729a76dd24f8aa4d050f8381e51ed6a + src/mistralai/client/models/functioncallentryarguments.py: + id: 3df3767a7b93 + last_write_checksum: sha1:9858feba8f7f01017f10477a77dec851a1d06e55 + pristine_git_object: afe81b24e131a8ef879ee7f140271aa762b8ed2f + src/mistralai/client/models/functioncallevent.py: + id: 23b120b8f122 + last_write_checksum: sha1:62b5b94df4e5b6f945ead78871cdbfceb6cd40cf + pristine_git_object: 849eed76d08524e5e4d1e7cc1c3fa04386f5ef75 + src/mistralai/client/models/functionname.py: + id: 000acafdb0c0 + last_write_checksum: sha1:4145b7b817b712b85dcbedb309416c7ba72d827e + pristine_git_object: 07d98a0e65ccbcba330fb39c7f23e26d3ffc833c + src/mistralai/client/models/functionresultentry.py: + id: 213df39bd5e6 + last_write_checksum: sha1:3aa6834bf2beda061ac772a0a8a4d7ed5ad942a0 + pristine_git_object: 01e2e36fc0a9de6a2b06a4205004992baf0f9e43 + src/mistralai/client/models/functiontool.py: + id: 2e9ef5800117 + last_write_checksum: sha1:bce744d77a3dac92d4776a37be497311674bdc7d + pristine_git_object: eae872643c85115a825c2feda11d9a6c12a06b99 + src/mistralai/client/models/getfileresponse.py: + id: 81919086e371 + last_write_checksum: sha1:fc0232e54c0de355058c5bd82e424953b1659b56 + pristine_git_object: f625c153799dcd38e4990504d48371112b65cd15 + src/mistralai/client/models/getsignedurlresponse.py: + id: cee4e4197372 + last_write_checksum: sha1:ab9adbc06e7f02e791dc549ad1850ce1b1a250a7 + pristine_git_object: 4ba95894f2b89719fa58e7e397c28014dbd00316 + src/mistralai/client/models/githubrepository.py: + id: 4bc83ce18378 + last_write_checksum: sha1:21aa04bc426158ccbe1ded3bc65b46e6869e897d + pristine_git_object: 84b01078c2192de5d6668a6943d416a2ff30db5f + src/mistralai/client/models/githubrepositoryin.py: + id: eef26fbd2876 + last_write_checksum: sha1:18bd07155fff4b99d114353fee95e6bd828aeacd + pristine_git_object: 38bcc2087630f2fd4e9e5fa149449c32e21fdb07 + src/mistralai/client/models/imagedetail.py: + id: c1084b549abb + last_write_checksum: sha1:375db5c8fa87712dc37e46d0bf72283ae6cd6400 + pristine_git_object: 1982d357277a92fc7ebea3b99146116596d99c78 + src/mistralai/client/models/imagegenerationtool.py: + id: e1532275faa0 + last_write_checksum: sha1:88a1347876f69960dc33f8e2cb9929ab1a90a224 + pristine_git_object: c1789b18028156ae683d0323e65e47a43694570f + src/mistralai/client/models/imageurl.py: + id: e4bbf5881fbf + last_write_checksum: sha1:28ef2509fdb489ecf379b60e883e6957aebd2797 + pristine_git_object: ac1030f5d61144e393b2aa9f3ffea893faabb1f7 + src/mistralai/client/models/imageurlchunk.py: + id: 746fde62f637 + last_write_checksum: sha1:0ac388d25cae5348ffb3821706c3a8b64e716ff5 + pristine_git_object: 7134b46e7428cee52eda859cb78387c99f7e1f5a + src/mistralai/client/models/inputentries.py: + id: 44727997dacb + last_write_checksum: sha1:9e2a776be59c5043ea4179a60ac082faf064cc3d + pristine_git_object: e2da5a80aea121d18e2232f302ad73f63b4fc050 + src/mistralai/client/models/inputs.py: + id: 84a8007518c7 + last_write_checksum: sha1:d067587b5395529fbd638741f20b80edb2848e39 + pristine_git_object: 9ecd7f484ea306b91a9ebf038a0addd80ccd57c4 + src/mistralai/client/models/instructrequest.py: + id: 6d3ad9f896c7 + last_write_checksum: sha1:b56a77442b50b50151adedaa5ec356dc96c56428 + pristine_git_object: e5f9cccf174d8e73c42e8ee4aa294b43e1ad6cf5 + src/mistralai/client/models/jobmetadata.py: + id: cfbdde7fc0a2 + last_write_checksum: sha1:e1b180a47ca888d0fd4cbc34b62000d3ac86c2b5 + pristine_git_object: f6e96fa104e7a6c8ce9a94538a3d00167a2ae341 + src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: b56cb6c17c95 + last_write_checksum: sha1:21b5794f110c53691654d7195201f9a4b7793f21 + pristine_git_object: de2e63472ac53809cfeae200bd7d2f3dcbb70034 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 36b5a6b3ceee + last_write_checksum: sha1:b41862f037d74bbdc44fb4df5f65cd402a16703b + pristine_git_object: d779e1d96c359b0d548d5dee17c06ae2a505cf47 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: d8f0af99c94d + last_write_checksum: sha1:a50885f97cfd4d38bc3e3b0746c88bd602b88f94 + pristine_git_object: 89ac3c933347497b6fb1ec26fecb485802ef85fc + src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: 34f89d2af0ec + last_write_checksum: sha1:3d5242f757ee9be10963af9cd5d47824fc83c71a + pristine_git_object: 9fa99837dda7e9413d3a05822cd17107c5fae51d + src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: d175c6e32ecb + last_write_checksum: sha1:515b7737cf8262243ee6175e297714125f3962bc + pristine_git_object: 56fa534044522f27fb26ef4820d10f22752134ea + src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: 81651291187a + last_write_checksum: sha1:19a0707e2f73b0184959d7c710a170650fa1767a + pristine_git_object: db857f7d6cc77057491e4b968798f730228b09bc + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: d910fd8fe2d6 + last_write_checksum: sha1:52704f01d7388a8b62d59b6f7cd94fcb7d067ebf + pristine_git_object: ddd9c1891356a7c272e0244a9aea3d3d6b2d00d6 + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: cf43028824bf + last_write_checksum: sha1:36082bde6f3d932c66178729533e2a69040fdeab + pristine_git_object: ec80a158f45061b122f84ebaff89ae82ef8d98ef + src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: e7ff4a4a4edb + last_write_checksum: sha1:8cbfc309c09df806ad7d130004b4e1c2b89ede0a + pristine_git_object: cd25fa04f29dd544f01f3620b31d1c54c86addbb + src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: 7cc1c80335a9 + last_write_checksum: sha1:f66c16423155066b844f8e89446d2acbb6e68157 + pristine_git_object: fd01fe6948613b0fffef9ac76cf1a0f9011ec5af + src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 6d9dc624aafd + last_write_checksum: sha1:fbacb171b9c75f1fe45406f542a958d10c15fae2 + pristine_git_object: 296070b426900305fe4596f03a3c9f081cdb2dcf + src/mistralai/client/models/jsonschema.py: + id: e1fc1d8a434a + last_write_checksum: sha1:d01507ab0a1f6067cbc65aaba199de340ccc68aa + pristine_git_object: dfababa694305c96f98ddebf2f09e448e737c855 + src/mistralai/client/models/legacyjobmetadata.py: + id: 0330b8930f65 + last_write_checksum: sha1:3c2f669a05cc01227f62d6a8da1840d9c458d52f + pristine_git_object: 5757675895b3c56d8aa7c174deb08567e596ecf8 + src/mistralai/client/models/libraries_delete_v1op.py: + id: b2e8bbd19baa + last_write_checksum: sha1:ba41496bc99040f7598659c5b037b955b7f6d385 + pristine_git_object: 893ab53b11672edd9cde175e68a80d89ff949cb6 + src/mistralai/client/models/libraries_documents_delete_v1op.py: + id: 81eb34382a3d + last_write_checksum: sha1:66d1c6ec5e2535b0db72a3beac65b25a1f2336d7 + pristine_git_object: 0495832efba33314f3cd28fe62759c6dac5ca706 + src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: a7417ebd6040 + last_write_checksum: sha1:030ca9fb7e10396e6b743ee644fe1a734e1df1f0 + pristine_git_object: 186baaed8346d106272fea2e4826587634b061bc + src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: + id: d4b7b47913ba + last_write_checksum: sha1:fdad7a6d3ae9a9c69009caf8207b284835675a9a + pristine_git_object: ebcf85d77ed6982d510ae95a6971e1d4b3ad56ca + src/mistralai/client/models/libraries_documents_get_status_v1op.py: + id: f314f73e909c + last_write_checksum: sha1:11d463eb328a1133658e8ff92340edc7f75923e4 + pristine_git_object: 1f4847874cdeff26caaf5fd16e0f8382834ecb2b + src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: + id: 1ca4e0c41321 + last_write_checksum: sha1:26133a83bf0ef063c78069da1bbb96d58f44f30c + pristine_git_object: e0508d66fce682ed20a029604897137940689327 + src/mistralai/client/models/libraries_documents_get_v1op.py: + id: 26ff35f0c69d + last_write_checksum: sha1:e87e56e8fb9f7c11d61c805362db4755a81186b9 + pristine_git_object: 857dfbe60c57af8b0fa6655a049ed336d70fb941 + src/mistralai/client/models/libraries_documents_list_v1op.py: + id: 756f26de3cbe + last_write_checksum: sha1:5a1a9e025dc7a1fedaa5199d396a73c4986d4113 + pristine_git_object: da7d793b65139a3894b077a9665b392e8a44e8a2 + src/mistralai/client/models/libraries_documents_reprocess_v1op.py: + id: dbbeb02fc336 + last_write_checksum: sha1:bd5013cb1581dd13642ce7edf1e701f5b0c967c4 + pristine_git_object: a2f9ba2a0465fb3a8eb3b9afbb41d813de348656 + src/mistralai/client/models/libraries_documents_update_v1op.py: + id: 734ba6c19f5f + last_write_checksum: sha1:e12ca003680ff17523fe09438cd8f71d00ea081e + pristine_git_object: 7ad4231f72901b675d9af67c33364592c86be5ab + src/mistralai/client/models/libraries_documents_upload_v1op.py: + id: "744466971862" + last_write_checksum: sha1:9691ac41ecf986c9ccfad81423d367f96b10f4b7 + pristine_git_object: 388633d1c7e906803b711ef2bbf37656624515a9 + src/mistralai/client/models/libraries_get_v1op.py: + id: d493f39e7ebb + last_write_checksum: sha1:25b3c2c1040cd73ebd6b988b8b27708831affefd + pristine_git_object: 7a51d6053aa2cf2e6524a80487fe9549eec3dfa1 + src/mistralai/client/models/libraries_share_create_v1op.py: + id: feaacfd46dd3 + last_write_checksum: sha1:72e07fb60edbe1989865ba2ac90349edeb183f7e + pristine_git_object: 00ea74824b2efc4150d2e547e2eee416e5f6f2ee + src/mistralai/client/models/libraries_share_delete_v1op.py: + id: 7f3a679ca384 + last_write_checksum: sha1:897857c11cf0c14a0a81ef122dec4395dc16c0ce + pristine_git_object: eca3f86a6135e702f8cb6412a5f215dac2335a8f + src/mistralai/client/models/libraries_share_list_v1op.py: + id: 8f0af379bf1c + last_write_checksum: sha1:d27e0360c504576c315350fc226d371da455a598 + pristine_git_object: 895a259059283a17cc7558e3cc03022e2d4dd259 + src/mistralai/client/models/libraries_update_v1op.py: + id: 92c8d4132252 + last_write_checksum: sha1:a252f68e65cdb47e27d7059f256381daf2847344 + pristine_git_object: 54b0ab708c665ccb841b1c8d0f2748c390850506 + src/mistralai/client/models/library.py: + id: 028a34b08f9c + last_write_checksum: sha1:65f02f963a0540385681b88c7c7fba98d0d704f4 + pristine_git_object: 1953b6fbc6d7ad245ccacd9d665fb29853b00af7 + src/mistralai/client/models/listbatchjobsresponse.py: + id: 99d94c86a871 + last_write_checksum: sha1:7530be5f80a0756527be94758e800e8118e53210 + pristine_git_object: 35a348a1160dcf6d82d58c70cea07e11730359fb + src/mistralai/client/models/listdocumentsresponse.py: + id: f593d8e66833 + last_write_checksum: sha1:0d842168856056ff681b2a1c36b87df8e0d96570 + pristine_git_object: c48b8c051ad0d1fb4aed8396697e57e782be5a40 + src/mistralai/client/models/listfilesresponse.py: + id: 85d6d24c1a19 + last_write_checksum: sha1:caf901685bfb6f13d707b89726aaf6e5116cd054 + pristine_git_object: 10a60126600343033a4b0511d717cac6f1924b4d + src/mistralai/client/models/listfinetuningjobsresponse.py: + id: 118e05dbfbbd + last_write_checksum: sha1:f0582740a6777039e9695d97f072b5a3c34b483e + pristine_git_object: 1e434c5986bf577e2b42cca943cc6896a83d1fa2 + src/mistralai/client/models/listlibrariesresponse.py: + id: df556a618365 + last_write_checksum: sha1:55afb46b1fa797bc46574e5256cd063574c6fcbf + pristine_git_object: 337fe105731d8f3ced1f8f1299ff4081b9d5bfbe + src/mistralai/client/models/listsharingout.py: + id: ee708a7ccdad + last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 + pristine_git_object: 443ad0d6a275c1c8bae4adda3e67621b068c0412 + src/mistralai/client/models/messageentries.py: + id: e13f9009902b + last_write_checksum: sha1:43aebdc9eaecc8341298dc6b281d0d57edf4e9e6 + pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef + src/mistralai/client/models/messageinputcontentchunks.py: + id: 01025c12866a + last_write_checksum: sha1:6a0988d4e52aa2e9f7b09ae1e3266ecf9639c22b + pristine_git_object: 1e04ce24d62db6667129b35eb28dabcfd4135ea8 + src/mistralai/client/models/messageinputentry.py: + id: c0a4b5179095 + last_write_checksum: sha1:b5bad18b88c0bfbbddfdafa6dc50a09e40a6ebd7 + pristine_git_object: c948a13e3cc2071dd1b3d11c419ea61d51470152 + src/mistralai/client/models/messageoutputcontentchunks.py: + id: 2ed248515035 + last_write_checksum: sha1:dc7456e44084cba9cc6a46553fd64b1eb25f8d77 + pristine_git_object: bf455d17db16e4bc11da0ebb105a9f6ad4d63c01 + src/mistralai/client/models/messageoutputentry.py: + id: a07577d2268d + last_write_checksum: sha1:38ad03422407925087835ab888c0be40bf5fa7fa + pristine_git_object: 6a9c52ed59af1497577be2538e7141d57eea4c8f + src/mistralai/client/models/messageoutputevent.py: + id: a2bbf63615c6 + last_write_checksum: sha1:c3317ab9279c499dd7fb26f45799ca9369676ac7 + pristine_git_object: d765f4fd3c4e43c37063833368e4b21cc0bfbcf2 + src/mistralai/client/models/metric.py: + id: c6a65acdd1a2 + last_write_checksum: sha1:5ef7c75b278f16b412b42889ff0f2fc19d87cb7d + pristine_git_object: 1413f589f7f23991a12c1367bc6f287b5e07d4a4 + src/mistralai/client/models/mistralpromptmode.py: + id: 95abc4ec799a + last_write_checksum: sha1:a1417b987bb34daeb73ca4e015c085814e6c8ad2 + pristine_git_object: 9b91323e7545d636308064085ca16fc554eac904 + src/mistralai/client/models/modelcapabilities.py: + id: 64d8a422ea29 + last_write_checksum: sha1:0f733a45f06cb2c603b47134d999a2de4c0a7bb0 + pristine_git_object: d9293ccc163995cfe0419d05c90fe1ae8e75cf57 + src/mistralai/client/models/modelconversation.py: + id: fea0a651f888 + last_write_checksum: sha1:4c1b31d95351dea877e24bd452b32d8e22edf42e + pristine_git_object: bb33d2e0e047bc075cb7ae284958b80a5b5ee657 + src/mistralai/client/models/modellist.py: + id: 00693c7eec60 + last_write_checksum: sha1:de62fc6787f482e5df0ff0e70415f493f177b9a1 + pristine_git_object: 5fd835f24cd1098a153ebfb3e958038a183d28a7 + src/mistralai/client/models/moderationobject.py: + id: 132faad0549a + last_write_checksum: sha1:a8c1454a533e466216ef98dd198ae8959f51fa76 + pristine_git_object: e7ccd8f6f1f75704a973be7ebabc49617070c34a + src/mistralai/client/models/moderationresponse.py: + id: 06bab279cb31 + last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 + pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c + src/mistralai/client/models/ocrimageobject.py: + id: 685faeb41a80 + last_write_checksum: sha1:13f4e4d33d8fb5b0ee842695d4cc8329bd7ca382 + pristine_git_object: 365f062b5674141aad4b1601a85bec7a56db4cff + src/mistralai/client/models/ocrpagedimensions.py: + id: 02f763afbc9f + last_write_checksum: sha1:f572ed8992ba1ba4d53b705c4e8c94c85ae1290e + pristine_git_object: 847205c6c74a621dd2ee6d9eb18d1acba8395c50 + src/mistralai/client/models/ocrpageobject.py: + id: 07a099f89487 + last_write_checksum: sha1:5089ac3f02e4225d6c95cc9f05b74013694536da + pristine_git_object: ffc7b3b615e17a8e0d76fea4081249b143d8fe4d + src/mistralai/client/models/ocrrequest.py: + id: 36f204c64074 + last_write_checksum: sha1:9e9009dace9ff36cbff0cb8de408a1e0585147a7 + pristine_git_object: 4ad337ced23b3bdad21785b8dc3fcadbb868d4f0 + src/mistralai/client/models/ocrresponse.py: + id: 2fdfc881ca56 + last_write_checksum: sha1:f1d18dbf4cd02f3598ae574d5033c30989fa6985 + pristine_git_object: e63eed987f4eb83f3406b15cf4d840fd43528a49 + src/mistralai/client/models/ocrtableobject.py: + id: d74dd0d2ddac + last_write_checksum: sha1:492f8e4c30b61330592768b13cffcf9a9eb2c0fa + pristine_git_object: 66bb050f30790c3fc51cdca1b73e847388fe50c5 + src/mistralai/client/models/ocrusageinfo.py: + id: 272b7e1785d5 + last_write_checksum: sha1:2b37766fdff72e7ec6e052f248362f7bb3989d2c + pristine_git_object: 2ec1322b29d7fe5246b9ad355a4997222b37970f + src/mistralai/client/models/outputcontentchunks.py: + id: 9ad9741f4975 + last_write_checksum: sha1:16c43816ac7b7afd134bce1cda5bb44485d9fafe + pristine_git_object: fab7907b105cc9d9c738c5cca9c09eba9d5c4781 + src/mistralai/client/models/paginationinfo.py: + id: 48851e82d67e + last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb + pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d + src/mistralai/client/models/prediction.py: + id: 1cc842a069a5 + last_write_checksum: sha1:3ee24375eb7f00cea0c9db6eebc564ce7067f295 + pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 + src/mistralai/client/models/processingstatusout.py: + id: 3df842c4140f + last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a + pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e + src/mistralai/client/models/realtimetranscriptionerror.py: + id: 8c2267378f48 + last_write_checksum: sha1:78637de61d6fc3bc1fff8e95c0a6f5ffc1a3e111 + pristine_git_object: c661e46100752119521f63045e8ebe79105ecc01 + src/mistralai/client/models/realtimetranscriptionerrordetail.py: + id: 5bd25cdf9c7a + last_write_checksum: sha1:a226b10718b1fe4a661311cbd98ea3b1d1ac4163 + pristine_git_object: cec1f6eabd44ceab4e58694a0862c9c90ea2f264 + src/mistralai/client/models/realtimetranscriptioninputaudioappend.py: + id: 8b03cde6e115 + last_write_checksum: sha1:abcf48a48b077e836e2ae5647d93bd61007b9aa7 + pristine_git_object: 8156a2704bd95b74875f7a9ac17191e026f08993 + src/mistralai/client/models/realtimetranscriptioninputaudioend.py: + id: c187ba1b551d + last_write_checksum: sha1:fa96156774481ca3b98f8c0f99b3b1db01280b37 + pristine_git_object: 473eedb744141faa3447929865a76129d5e96432 + src/mistralai/client/models/realtimetranscriptioninputaudioflush.py: + id: b27b600c310e + last_write_checksum: sha1:8a8eb7de4137cf8cd810d93d984009bf8dff51c4 + pristine_git_object: 553d14c7720b3d1388901989d8160f0e3318ba56 + src/mistralai/client/models/realtimetranscriptionsession.py: + id: 02517fa5411a + last_write_checksum: sha1:eb9a23fb89e0bdb3bb6168f512488a98bd626bc1 + pristine_git_object: a74a457b1e54deb1fcd203ce5ff2c57691f16b18 + src/mistralai/client/models/realtimetranscriptionsessioncreated.py: + id: 4e3731f63a3c + last_write_checksum: sha1:6997848cf22dc90b10597eaf9f0dd966ace969af + pristine_git_object: bb96875ab913f3d6ff241a00d94a87e877637782 + src/mistralai/client/models/realtimetranscriptionsessionupdated.py: + id: 686dc4f2450f + last_write_checksum: sha1:e023fe0c8c54da644fc797c25dfeb070b6f0fd1c + pristine_git_object: fea5db4a1b956cb8253e4f147463c47958bfd989 + src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py: + id: 4e1b3fd7c5a3 + last_write_checksum: sha1:7da202e016b1d1dfc36a13ac03e3b419f0952cd2 + pristine_git_object: 07ad59a41f8a16b9c23c4e0be503a801ec0e2dd6 + src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py: + id: 7033fdb33ad4 + last_write_checksum: sha1:812f072a9effe1ce44e56094121ed10b3a83e39d + pristine_git_object: a89441e91dff4b7a78e8dd247b43243e89bf129d + src/mistralai/client/models/referencechunk.py: + id: 921acd3a224a + last_write_checksum: sha1:a8bff06a2a040556bce8e6212973a774bee6bd34 + pristine_git_object: e0bbae4e08275e82010080d4ee84612e01a07f81 + src/mistralai/client/models/requestsource.py: + id: 3f2774d9e609 + last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 + pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 + src/mistralai/client/models/responsedoneevent.py: + id: cf8a686bf82c + last_write_checksum: sha1:144a8bf407391948946f3f5362db78a33c45ee6c + pristine_git_object: be38fba81c08088303c4342c99ac3628c5957785 + src/mistralai/client/models/responseerrorevent.py: + id: b286d74e8724 + last_write_checksum: sha1:df3f53344624082471c795131552689510946345 + pristine_git_object: fa4d0d01c1cb7f15d6f469279c2000d2fad8f459 + src/mistralai/client/models/responseformat.py: + id: 6ab8bc8d22c0 + last_write_checksum: sha1:0ab455566c6bb0b63e2cb1f61f300266021f5954 + pristine_git_object: b2971412549cc5b95c53b93425dbd5b6503a4df7 + src/mistralai/client/models/responseformats.py: + id: c4462a05fb08 + last_write_checksum: sha1:3cb82d44a4f9df5e9a3f51867be6eab1d439d87a + pristine_git_object: 21345778ad2d41a3746292e67fec628f9ec2a84d + src/mistralai/client/models/responsestartedevent.py: + id: 24f54ee8b0f2 + last_write_checksum: sha1:f66a0a67444916e838ca9a63144fb661832b54b9 + pristine_git_object: 84abfcd9ac159b9bd9234ff015d5525d88d663f6 + src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: + id: 6fefa90ca351 + last_write_checksum: sha1:52775e73fb5c51d245362ce63672cec776e5b6bd + pristine_git_object: cd5955c1eadb8cd9d1f9ecc388c2cc17df11c885 + src/mistralai/client/models/sampletype.py: + id: a9309422fed7 + last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 + pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d + src/mistralai/client/models/security.py: + id: c2ca0e2a36b7 + last_write_checksum: sha1:d74333517caae2a1aa58517e8e935e46913bcc66 + pristine_git_object: f3b3423e850a1afa3b0fa5fa6c94f6018ff70627 + src/mistralai/client/models/shareenum.py: + id: a0e2a7a16bf8 + last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba + pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e + src/mistralai/client/models/sharingdelete.py: + id: f5ecce372e06 + last_write_checksum: sha1:247d793bd1ddc0ad35d010c17e5b32eba826e3a1 + pristine_git_object: 33ccd7e71b8f65d2a9329d8632b5446ca0431d0a + src/mistralai/client/models/sharingin.py: + id: e953dda09c02 + last_write_checksum: sha1:7c2b5333c634ed7889fc907edbf89c6066db5928 + pristine_git_object: 7c1a52b049db4afbd6a06b5f39966dbec4f862ba + src/mistralai/client/models/sharingout.py: + id: 0b8804effb5c + last_write_checksum: sha1:a78e4f6bf2f49ae8250787e1680b5004563b32ac + pristine_git_object: ab3679a4cbcc2826ff2672a09e4eaf4990b5c6a9 + src/mistralai/client/models/source.py: + id: fcee60a4ea0d + last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 + pristine_git_object: fcea403cdbad44299fb2178f07a63bb7e83dc033 + src/mistralai/client/models/ssetypes.py: + id: 1733e4765106 + last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf + pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6 + src/mistralai/client/models/systemmessage.py: + id: 500ef6e85ba1 + last_write_checksum: sha1:a88de3fc70adab47943f867336659b3a1a6cdae0 + pristine_git_object: 2602cd2db03cd129b42b343f2dc79ce68106ac35 + src/mistralai/client/models/systemmessagecontentchunks.py: + id: 297e8905d5af + last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f + pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5 + src/mistralai/client/models/textchunk.py: + id: 9c96fb86a9ab + last_write_checksum: sha1:89cbb66753d7a3585ce58c70219a349f770909cc + pristine_git_object: ac9f3137dddc15e1cd10aa6385b76510e6c23e33 + src/mistralai/client/models/thinkchunk.py: + id: 294bfce193a4 + last_write_checksum: sha1:9126c530e93ae7532235d4bfa3e2b202423a0f24 + pristine_git_object: 5995e6010bfb63d0ab2ded6e0f55b7dca23f769a + src/mistralai/client/models/timestampgranularity.py: + id: 68ddf8d702ea + last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f + pristine_git_object: 8d3773752444db865c0e2629ad9eed66eb7f2bc6 + src/mistralai/client/models/tool.py: + id: 48b4f6f50fe9 + last_write_checksum: sha1:7e33d7a0349e652b40926f6a51240b9a5c1a7dbd + pristine_git_object: 2b9965e571eeb494f8cf867818aab488198ecdb2 + src/mistralai/client/models/toolcall.py: + id: fb34a1a3f3c2 + last_write_checksum: sha1:7d0275444dd6be291c091e908a2b7f2fc536f20f + pristine_git_object: 181cec33c904535c804de06c7357bd493647cd70 + src/mistralai/client/models/toolcallconfirmation.py: + id: f2e953cfb4fe + last_write_checksum: sha1:554a2e073917ffb479efe5887c0b59a2f4967c6e + pristine_git_object: fd6eca50a7ec2f4cca2ae20958717881660e0ac5 + src/mistralai/client/models/toolchoice.py: + id: 14f7e4cc35b6 + last_write_checksum: sha1:a787827a4f4ecf5b6a7068ba94fd1ff074898b51 + pristine_git_object: cb787df1b62190319c6e9679521228af28ee7204 + src/mistralai/client/models/toolchoiceenum.py: + id: c7798801f860 + last_write_checksum: sha1:5388b2a6fad842f8e4ae79e6257b4d14c122a6ff + pristine_git_object: d66c3d07058eb87bcc3eec10de99a616b5f6638a + src/mistralai/client/models/toolconfiguration.py: + id: faec24b75066 + last_write_checksum: sha1:912c1c10e88053ae4ee44af763c9ab7c95339f5d + pristine_git_object: b903c8b6c13777b671faf5aa97994117734b3a8f + src/mistralai/client/models/toolexecutiondeltaevent.py: + id: df8f17cf3e07 + last_write_checksum: sha1:2537a6e2dffde3760a064fdf92efa6cdc117ba2b + pristine_git_object: 5a977ca6fc5bfdeadd929f18037fb5c9a9582b40 + src/mistralai/client/models/toolexecutiondoneevent.py: + id: 514fdee7d99f + last_write_checksum: sha1:d62f57105e4816e03030bc9a2a5645482ea80c55 + pristine_git_object: 1c9b0ec92d87a8559ef050a21ba309e05f6b0314 + src/mistralai/client/models/toolexecutionentry.py: + id: 76db69eebe41 + last_write_checksum: sha1:9a697fdad4178b95d7d1bd1eaee77ef948fb2d4f + pristine_git_object: 0d6f2a1305f262519ba719969c6e62ceb95e52b3 + src/mistralai/client/models/toolexecutionstartedevent.py: + id: 40fadb8e49a1 + last_write_checksum: sha1:9f6e43d5b2c807ca3b080ea7bd4878ba3ec2a788 + pristine_git_object: 21e5bfa8fea7fa27b7031b740f72a873760700cc + src/mistralai/client/models/toolfilechunk.py: + id: 26c8aadf416a + last_write_checksum: sha1:89bb203aa600bf6a516fbe10e1787a132de9ca5a + pristine_git_object: 0708b3ff4c4f97a0e4c4359baeedc89ef0b10278 + src/mistralai/client/models/toolmessage.py: + id: 15f1af161031 + last_write_checksum: sha1:cfa16352cf5bbcd6eedbfbf7f3002149fd989418 + pristine_git_object: 05a0ee636a4393e3ce65cc1b6e272ddf8ec79254 + src/mistralai/client/models/toolreferencechunk.py: + id: 822e9f3e70de + last_write_checksum: sha1:f5c9265e27fa2d4526e5ce50dff7f7bd641eb642 + pristine_git_object: 95454fe891dd3955121565431897c1b8f0c25083 + src/mistralai/client/models/tooltypes.py: + id: 86c3b54272fd + last_write_checksum: sha1:e90c15c1e645a5f207af0c7ac728cb0a521c6706 + pristine_git_object: e601c1967c42ef8d0c2eea98bc5c0ca722cde066 + src/mistralai/client/models/trainingfile.py: + id: 2edf9bce227d + last_write_checksum: sha1:8fd6a2560554b3c2166daff2ff1a48bb49053489 + pristine_git_object: 2faeda8bfb38c810c5d80eb17cc9928c49c7caf5 + src/mistralai/client/models/transcriptionresponse.py: + id: 60896dbc6345 + last_write_checksum: sha1:e8a318798dfe4ebd64c9d64f487f7e3e8dd05532 + pristine_git_object: 70315463ff8e01c680aa80d68bdc32a7429ddb16 + src/mistralai/client/models/transcriptionsegmentchunk.py: + id: d1e6f3bdc74b + last_write_checksum: sha1:ee56c437444cbfa7983ba950e3e166f392d208cb + pristine_git_object: b87bfc2f9de0a07d62e8cc1fe265a9c29f56f194 + src/mistralai/client/models/transcriptionstreamdone.py: + id: 066a9158ed09 + last_write_checksum: sha1:cb8ea2e34c712ef1694bd1b6a83e7eed9318b13b + pristine_git_object: e3c5016901a2400c222e5b821b5afb312af1a1e6 + src/mistralai/client/models/transcriptionstreamevents.py: + id: b50b3d74f16f + last_write_checksum: sha1:68f82eea8a0bcf1b8b65cedf9e276f34121d398b + pristine_git_object: 073fd99aebf6f90027a45c8ee4daa7ffeb8ee34e + src/mistralai/client/models/transcriptionstreameventtypes.py: + id: 6f71f6fbf4c5 + last_write_checksum: sha1:1d568460b1521f17dd5e551632ae4d7883a98dd3 + pristine_git_object: c74bbb7483cc3981ee3638c80c15924f3e1c20c4 + src/mistralai/client/models/transcriptionstreamlanguage.py: + id: e94333e4bc27 + last_write_checksum: sha1:d1ee93b09ca377bc29845924d53db3ccf250269d + pristine_git_object: b6c6190684eccdc3fe6ce4bc7b86f5ee6490a197 + src/mistralai/client/models/transcriptionstreamsegmentdelta.py: + id: c0a882ce57e5 + last_write_checksum: sha1:3507a0355027136e92ada0c9766277381d5dee96 + pristine_git_object: 32ef8f9b2aa34253ea10c830ae856a931306f658 + src/mistralai/client/models/transcriptionstreamtextdelta.py: + id: 6086dc081147 + last_write_checksum: sha1:968b4bc32731be6c63be3fd90eb26f4357f891a3 + pristine_git_object: 42f0ffb7f16bee4f68f9db9807aa4ec3d9ae5176 + src/mistralai/client/models/unarchivemodelresponse.py: + id: 22e2ccbb0c80 + last_write_checksum: sha1:a69d8dc8636f3326eb61892b85a9b60044b457fe + pristine_git_object: 5c75d30edaade853f085533da0f9f5de221b6e44 + src/mistralai/client/models/updateagentrequest.py: + id: 914b4b2be67a + last_write_checksum: sha1:f37178288254e905ce298befbe801fa6ba63ec0e + pristine_git_object: b751ff74396ca0e74411a7a1549c6e0b4988fc49 + src/mistralai/client/models/updatedocumentrequest.py: + id: a8cfda07d337 + last_write_checksum: sha1:c644725ae379f22550d00b42baefb511d1cc3667 + pristine_git_object: 61e696555c0654208b0d9dcd63fc475ad85297d4 + src/mistralai/client/models/updatelibraryrequest.py: + id: 51bc63885337 + last_write_checksum: sha1:622d6a7af58d2e86d7d2dd4e312883d11ce5a8a8 + pristine_git_object: 91cbf2a1c76361c9c5ee1554c80f1507ff5ee50b + src/mistralai/client/models/updatemodelrequest.py: + id: fe649967751e + last_write_checksum: sha1:dbba8a6ccbfae36ac56808742f4c05ab99dd2c6c + pristine_git_object: f685cfcce1aa3669159fec902ba78034ef3141b8 + src/mistralai/client/models/usageinfo.py: + id: 54adb9a3af16 + last_write_checksum: sha1:04705526057c43495284fe9c50cf7df2af7b49fd + pristine_git_object: 31cbf07e3e38df4452da320e44f3fa9aef17c196 + src/mistralai/client/models/usermessage.py: + id: cb583483acf4 + last_write_checksum: sha1:0060ee5f5fbbd78073cd56546127a021354a8072 + pristine_git_object: 63e7679246a11fe8e7a3db06e382779c05c64366 + src/mistralai/client/models/validationerror.py: + id: 15df3c7368ab + last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d + pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 + src/mistralai/client/models/wandbintegration.py: + id: 4823c1e80942 + last_write_checksum: sha1:cc0a7ce49756928f4d261375526a3498b9e4f05d + pristine_git_object: f0df2c77845b2741802730fcd4f3c5d31b7ddd8e + src/mistralai/client/models/wandbintegrationresult.py: + id: 8787b4ad5458 + last_write_checksum: sha1:6ba506e01333a3084f63fbfccb459235b6560554 + pristine_git_object: 575cbd42297f02a54542c7eda3a4cabaa28dda23 + src/mistralai/client/models/websearchpremiumtool.py: + id: bfe88af887e3 + last_write_checksum: sha1:ceb073d3b3916b2ff8f7b7e5eb01692893024d68 + pristine_git_object: 00d4a4b427331660d29513ec43e68fc7cf8afcfb + src/mistralai/client/models/websearchtool.py: + id: 26b0903423e5 + last_write_checksum: sha1:a07d7ace2d68c944c686e69053bef8d84231814b + pristine_git_object: 6871080f6279ef42a0525c1e26368baafc98fbb7 + src/mistralai/client/models_.py: + id: 1d277958a843 + last_write_checksum: sha1:b9ea906a7704aa57efe5d13ac547e502d961d3b5 + pristine_git_object: a287c413ddf48bd5ff7fc0a685e05d4bcdabb6e5 + src/mistralai/client/ocr.py: + id: 2f804a12fc62 + last_write_checksum: sha1:707d91582149e76a3109df8b1a58bfd44111a93d + pristine_git_object: a46119d1577036be57896a7ea3737ab508497e4f + src/mistralai/client/py.typed: + id: d95cd1565e33 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/client/sdk.py: + id: 48edbcb38d7e + last_write_checksum: sha1:365709e35dc4e450a2c4931e75dcbd04568ab361 + pristine_git_object: 80bf25a749eb3b36035aaafa15f059bcf403ec80 + src/mistralai/client/sdkconfiguration.py: + id: b7dd68a0235e + last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e + pristine_git_object: 712e92e05c7fd3016431ec62ecb7b7789c8b7071 + src/mistralai/client/transcriptions.py: + id: 75b45780c978 + last_write_checksum: sha1:27a5b7dd6ed47b0f79b95fbb8599d439512ef344 + pristine_git_object: 7f01917d6e462cff9af75e70d32afbcc5958c7de + src/mistralai/client/types/__init__.py: + id: 000b943f821c + last_write_checksum: sha1:12a4ace69cbc63f1125eeddf901afed7cdf378b0 + pristine_git_object: cf83864312d8fed0bb9dd3ce39d373b040c36b2e + src/mistralai/client/types/basemodel.py: + id: 7ec465a1d3ff + last_write_checksum: sha1:b62a9d42d79a238399e04efbf5c02215c707efde + pristine_git_object: 4e889aa0ffbb4402e416a40fa6259334cb0a3c5c + src/mistralai/client/utils/__init__.py: + id: b69505f4b269 + last_write_checksum: sha1:98698da73839db7c258fd1afd45ccacff86c64be + pristine_git_object: 4bde281a1fd8c616d4b3529af0fcb79f57374310 + src/mistralai/client/utils/annotations.py: + id: 1ffdedfc66a2 + last_write_checksum: sha1:f86ba37de752e63076f25d53f9c54fce98d2a0bd + pristine_git_object: 4b60ab8e730e7093a064b6869c4a712b96e4aad8 + src/mistralai/client/utils/datetimes.py: + id: c40066d868c9 + last_write_checksum: sha1:412ca432d6f5a75b692a967bc6fc52e4f4eff7d5 + pristine_git_object: a2c94fac73ecbfb8acd8ed4f75692318e4f863ec + src/mistralai/client/utils/dynamic_imports.py: + id: ac9918d925c0 + last_write_checksum: sha1:93d3eac90a47a039e7a652ae120bec66be6c681a + pristine_git_object: 969f2fc71178ed2114640c8f0831f4f3acb25af8 + src/mistralai/client/utils/enums.py: + id: a0735873b5ac + last_write_checksum: sha1:fe05b6a21360b0eff1fc246e9a3ee01758521262 + pristine_git_object: d897495f053459106144501c67f2215251d52a27 + src/mistralai/client/utils/eventstreaming.py: + id: 3263d7502030 + last_write_checksum: sha1:24af3168dafe6b8d860cffb121fac11cd0e9d930 + pristine_git_object: 19a121529f180968f655baffbe446e5c1d6c2abb + src/mistralai/client/utils/forms.py: + id: 58842e905fce + last_write_checksum: sha1:d68ca0257e0e8bdc5cdc450f3e70a7ba789859f5 + pristine_git_object: 6facec5386675ccd5a26ff6093f98436a62fdf6b + src/mistralai/client/utils/headers.py: + id: 9066de2ead8b + last_write_checksum: sha1:bcd2f47b96bfaa54b3590c557a9267142d446be6 + pristine_git_object: 6491187230b5f11c7ff13396891ac69099a73a79 + src/mistralai/client/utils/logger.py: + id: 745023607a1f + last_write_checksum: sha1:2582e0cb889b6293c12ce9671aba6281d46bad44 + pristine_git_object: 3edad8307ea0ef38e857596a3ec11023a4af287f + src/mistralai/client/utils/metadata.py: + id: d49d535ae52c + last_write_checksum: sha1:54d300a665d3d5eafcc778a795d79347479b8337 + pristine_git_object: d46ffa59952926b7b1a842b0db2475527eda87df + src/mistralai/client/utils/queryparams.py: + id: bb77d4664844 + last_write_checksum: sha1:d02ce5b2dcc26edb7c937d75b98b70c22a5af189 + pristine_git_object: 0b78c548233f32afa2aafe0040ebb120b51532e8 + src/mistralai/client/utils/requestbodies.py: + id: 946cfcd26ee4 + last_write_checksum: sha1:8cac30839193ee0bb02975b0e225eab97adf4fd1 + pristine_git_object: 3aae69c7cf618776daec8bd46f9116b06c25e837 + src/mistralai/client/utils/retries.py: + id: 5f1a5b90423c + last_write_checksum: sha1:bbf8e376c1c801911e65e33566d3a142f46133f9 + pristine_git_object: bea1304150e77ca06185efb7db7798aaacd5e623 + src/mistralai/client/utils/security.py: + id: 1acb7c006265 + last_write_checksum: sha1:3981f6571daf28b3b553beb09a4ebeeeb6ceff14 + pristine_git_object: d8b9d8fe746babd0a87846812b1f4117d1a46de2 + src/mistralai/client/utils/serializers.py: + id: 53c57c7f29a8 + last_write_checksum: sha1:8a3a15cf273034261111f2559cacbb579e17cb1b + pristine_git_object: fbc2772dc4284775be92de6a086c1eade9376417 + src/mistralai/client/utils/unions.py: + id: d23713342634 + last_write_checksum: sha1:f814d757474f039199f501aa53cdfba97a8c6645 + pristine_git_object: 14ef1bd5c5abef9bd5f2a3a4ee2f79e954c67e7e + src/mistralai/client/utils/unmarshal_json_response.py: + id: b13585fc5626 + last_write_checksum: sha1:372a01f5abf034ddbe5d4a3fc68e9e397f86085a + pristine_git_object: 624433c4dd42c9fb1bfae363becc76c62e390e14 + src/mistralai/client/utils/url.py: + id: 3c6496c17510 + last_write_checksum: sha1:c64be472d29cf229f2b91102808dcb741371c227 + pristine_git_object: 27a6a3a05287ff8a4e24e379ae5d20280c2caf30 + src/mistralai/client/utils/values.py: + id: bb6ade7a7f82 + last_write_checksum: sha1:da9ce43ad241db386efd9b2f53d81eb051dd7544 + pristine_git_object: 2469a9f310a37a7170b54853715274f13d38901c examples: list_models_v1_models_get: speakeasy-default-list-models-v1-models-get: responses: "200": - application/json: {"object": "list"} + application/json: {"object": "list"} + "422": + application/json: {} + userExample: + responses: + "200": + application/json: {"object": "list"} + retrieve_model_v1_models__model_id__get: + speakeasy-default-retrieve-model-v1-models-model-id-get: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + delete_model_v1_models__model_id__delete: + speakeasy-default-delete-model-v1-models-model-id-delete: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + jobs_api_routes_fine_tuning_update_fine_tuned_model: + speakeasy-default-jobs-api-routes-fine-tuning-update-fine-tuned-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} + jobs_api_routes_fine_tuning_archive_fine_tuned_model: + speakeasy-default-jobs-api-routes-fine-tuning-archive-fine-tuned-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": true} + jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: + speakeasy-default-jobs-api-routes-fine-tuning-unarchive-fine-tuned-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": false} + agents_api_v1_conversations_start: + speakeasy-default-agents-api-v1-conversations-start: + requestBody: + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_list: + speakeasy-default-agents-api-v1-conversations-list: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"object": "conversation", "id": "", "created_at": "2025-11-20T22:30:47.754Z", "updated_at": "2025-08-05T08:36:20.296Z", "agent_id": ""}] + "422": + application/json: {} + agents_api_v1_conversations_get: + speakeasy-default-agents-api-v1-conversations-get: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2023-06-02T14:00:42.201Z", "updated_at": "2024-10-06T17:16:50.325Z", "agent_id": ""} + "422": + application/json: {} + agents_api_v1_conversations_append: + speakeasy-default-agents-api-v1-conversations-append: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_history: + speakeasy-default-agents-api-v1-conversations-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "tool.execution", "name": "image_generation", "arguments": ""}]} + "422": + application/json: {} + agents_api_v1_conversations_messages: + speakeasy-default-agents-api-v1-conversations-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + agents_api_v1_conversations_restart: + speakeasy-default-agents-api-v1-conversations-restart: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_start_stream: + speakeasy-default-agents-api-v1-conversations-start-stream: + requestBody: + application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true, "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + agents_api_v1_conversations_append_stream: + speakeasy-default-agents-api-v1-conversations-append-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + agents_api_v1_conversations_restart_stream: + speakeasy-default-agents-api-v1-conversations-restart-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "422": + application/json: {} + agents_api_v1_agents_create: + speakeasy-default-agents-api-v1-agents-create: + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "LeBaron", "name": ""} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "versions": [818563, 316961], "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_list: + speakeasy-default-agents-api-v1-agents-list: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "versions": [43153, 439473], "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z", "deployment_chat": true, "source": ""}] + "422": + application/json: {} + agents_api_v1_agents_get: + speakeasy-default-agents-api-v1-agents-get: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "versions": [845972, 878771, 621094], "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_update: + speakeasy-default-agents-api-v1-agents-update: + parameters: + path: + agent_id: "" + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "versions": [799821, 934063], "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + agents_api_v1_agents_update_version: + speakeasy-default-agents-api-v1-agents-update-version: + parameters: + path: + agent_id: "" + query: + version: 157995 + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "versions": [], "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + files_api_routes_upload_file: + speakeasy-default-files-api-routes-upload-file: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} + files_api_routes_list_files: + speakeasy-default-files-api-routes-list-files: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload"}], "object": "", "total": 999335} + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} + files_api_routes_retrieve_file: + speakeasy-default-files-api-routes-retrieve-file: + parameters: + path: + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "instruct", "source": "repository", "deleted": false} + userExample: + parameters: + path: + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} + files_api_routes_delete_file: + speakeasy-default-files-api-routes-delete-file: + parameters: + path: + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + userExample: + parameters: + path: + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} + files_api_routes_download_file: + speakeasy-default-files-api-routes-download-file: + parameters: + path: + file_id: "f8919994-a4a1-46b2-8b5b-06335a4300ce" + responses: + "200": + application/octet-stream: "x-file: example.file" + files_api_routes_get_signed_url: + speakeasy-default-files-api-routes-get-signed-url: + parameters: + path: + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://knotty-birdcage.net/"} + userExample: + parameters: + path: + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} + jobs_api_routes_fine_tuning_get_fine_tuning_jobs: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"object": "list", "total": 843585} + jobs_api_routes_fine_tuning_create_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: + requestBody: + application/json: {"model": "Camaro", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + responses: + "200": + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} + jobs_api_routes_fine_tuning_get_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: + parameters: + path: + job_id: "c167a961-ffca-4bcf-93ac-6169468dd389" + responses: + "200": + application/json: {"id": "babac92a-96fa-48c4-931c-f6f97e1bf24c", "auto_start": false, "model": "Spyder", "status": "FAILED", "created_at": 232438, "modified_at": 32259, "training_files": ["7a95c5a0-399d-4665-84c8-deab766d22dc"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 5651, "loss_function": "single_class"}]} + jobs_api_routes_fine_tuning_cancel_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: + parameters: + path: + job_id: "6188a2f6-7513-4e0f-89cc-3f8088523a49" + responses: + "200": + application/json: {"id": "770b9cc0-1ab6-44de-a816-67010644e9fb", "auto_start": false, "model": "Volt", "status": "CANCELLATION_REQUESTED", "created_at": 546404, "modified_at": 180081, "training_files": ["45e621c6-ac30-4133-b6d1-fc0d1fe24c9f"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [""], "weight": 1298.58, "loss_function": "multi_class"}]} + jobs_api_routes_fine_tuning_start_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: + parameters: + path: + job_id: "56553e4d-0679-471e-b9ac-59a77d671103" + responses: + "200": + application/json: {"id": "68ad461a-676e-47fe-a07e-15e38f5082b5", "auto_start": false, "model": "Grand Cherokee", "status": "STARTED", "created_at": 134515, "modified_at": 192651, "training_files": ["39dabc3d-15eb-49ac-a549-69973f33acee"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + jobs_api_routes_batch_get_batch_jobs: + speakeasy-default-jobs-api-routes-batch-get-batch-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + order_by: "-created" + responses: + "200": + application/json: {"object": "list", "total": 186589} + jobs_api_routes_batch_create_batch_job: + speakeasy-default-jobs-api-routes-batch-create-batch-job: + requestBody: + application/json: {"endpoint": "/v1/moderations", "model": "mistral-small-latest", "timeout_hours": 24} + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} + jobs_api_routes_batch_get_batch_job: + speakeasy-default-jobs-api-routes-batch-get-batch-job: + parameters: + path: + job_id: "4017dc9f-b629-42f4-9700-8c681b9e7f0f" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} + jobs_api_routes_batch_cancel_batch_job: + speakeasy-default-jobs-api-routes-batch-cancel-batch-job: + parameters: + path: + job_id: "4fb29d1c-535b-4f0a-a1cb-2167f86da569" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + fim_completion_v1_fim_completions_post: + speakeasy-default-fim-completion-v1-fim-completions-post: + requestBody: + application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} + agents_completion_v1_agents_completions_post: + speakeasy-default-agents-completion-v1-agents-completions-post: + requestBody: + application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} + responses: + "200": + application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"role": "assistant", "content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} + stream_agents: + speakeasy-default-stream-agents: + requestBody: + application/json: {"stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} + responses: + "422": + application/json: {} + embeddings_v1_embeddings_post: + speakeasy-default-embeddings-v1-embeddings-post: + requestBody: + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "list", "model": "mistral-embed", "usage": {"prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15, "prompt_audio_seconds": null}, "data": [{"object": "embedding", "embedding": [-0.016632080078125, 0.0701904296875, 0.03143310546875, 0.01309967041015625, 0.0202789306640625], "index": 0}, {"object": "embedding", "embedding": [-0.0230560302734375, 0.039337158203125, 0.0521240234375, -0.0184783935546875, 0.034271240234375], "index": 1}]} + moderations_v1_moderations_post: + speakeasy-default-moderations-v1-moderations-post: + requestBody: + application/json: {"model": "Durango", "input": ["", ""]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Corvette", "results": [{}]} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-moderation-latest", "input": ""} + responses: + "200": + application/json: {"id": "4d71ae510af942108ef7344f903e2b88", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0011335690505802631, "hate_and_discrimination": 0.0030753696337342262, "violence_and_threats": 0.0003569706459529698, "dangerous_and_criminal_content": 0.002251847181469202, "selfharm": 0.00017952796770259738, "health": 0.0002780309587251395, "financial": 0.00008481103577651083, "law": 0.00004539786823443137, "pii": 0.0023967307060956955}}, {"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.000626334105618298, "hate_and_discrimination": 0.0013670255430042744, "violence_and_threats": 0.0002611903182696551, "dangerous_and_criminal_content": 0.0030753696337342262, "selfharm": 0.00010889690747717395, "health": 0.00015843621804378927, "financial": 0.000191104321856983, "law": 0.00004006369272246957, "pii": 0.0035936026833951473}}]} + chat_moderations_v1_chat_moderations_post: + speakeasy-default-chat-moderations-v1-chat-moderations-post: + requestBody: + application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Explorer", "results": [{}]} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"input": [{"role": "tool", "content": ""}], "model": "LeBaron"} + responses: + "200": + application/json: {"id": "352bce1a55814127a3b0bc4fb8f02a35", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0010322310263291001, "hate_and_discrimination": 0.001597845577634871, "violence_and_threats": 0.00020342698553577065, "dangerous_and_criminal_content": 0.0029810327105224133, "selfharm": 0.00017952796770259738, "health": 0.0002959570847451687, "financial": 0.000079673009167891, "law": 0.00004539786823443137, "pii": 0.004198795650154352}}]} + classifications_v1_classifications_post: + speakeasy-default-classifications-v1-classifications-post: + requestBody: + application/json: {"model": "mistral-moderation-latest", "input": [""]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "ATS", "results": [{}, {"key": {"scores": {"key": 2080.19}}}]} + "422": + application/json: {} + chat_classifications_v1_chat_classifications_post: + speakeasy-default-chat-classifications-v1-chat-classifications-post: + requestBody: + application/json: {"model": "Camry", "input": [{"messages": [{"role": "system", "content": ""}]}]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Altima", "results": [{}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}]} + "422": + application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"image_url": {"url": "https://measly-scrap.com"}, "type": "image_url"}} + responses: + "200": + application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} + libraries_list_v1: + speakeasy-default-libraries-list-v1: + responses: + "200": + application/json: {"data": [{"id": "bfc452fd-4bcb-46ec-9f68-ceea101e924d", "name": "", "created_at": "2024-01-31T13:50:47.409Z", "updated_at": "2023-04-09T15:28:24.261Z", "owner_id": "3fb92cf9-0fea-44d0-958f-16963601a1f0", "owner_type": "", "total_size": 811051, "nb_documents": 634577, "chunk_size": 502060}]} + libraries_create_v1: + speakeasy-default-libraries-create-v1: + requestBody: + application/json: {"name": ""} + responses: + "201": + application/json: {"id": "7285d921-bbab-471e-a2df-600e096d8aca", "name": "", "created_at": "2025-12-10T18:12:15.618Z", "updated_at": "2023-12-29T15:14:03.343Z", "owner_id": "d5e2af8f-c98a-479e-aece-62d79ea6bab3", "owner_type": "", "total_size": 866940, "nb_documents": 123652, "chunk_size": 274694} + "422": + application/json: {} + libraries_get_v1: + speakeasy-default-libraries-get-v1: + parameters: + path: + library_id: "d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f" + responses: + "200": + application/json: {"id": "24e6ac5e-61cb-4f2c-b0c0-806dfd5d8dbf", "name": "", "created_at": "2023-01-19T09:20:07.756Z", "updated_at": "2023-05-28T00:39:57.656Z", "owner_id": "546a730e-7d06-4324-a4fd-2b7ff127978c", "owner_type": "", "total_size": 191122, "nb_documents": 932135, "chunk_size": null} + "422": + application/json: {} + libraries_delete_v1: + speakeasy-default-libraries-delete-v1: + parameters: + path: + library_id: "6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a" + responses: + "200": + application/json: {"id": "04e83772-3f8e-41d3-a053-763ed9937e07", "name": "", "created_at": "2025-03-15T23:45:26.060Z", "updated_at": "2024-08-03T06:23:12.129Z", "owner_id": "f636aa46-e1d5-4df4-966b-de4af27da6db", "owner_type": "", "total_size": 268102, "nb_documents": 821714, "chunk_size": null} + "422": + application/json: {} + libraries_update_v1: + speakeasy-default-libraries-update-v1: + parameters: + path: + library_id: "e01880c3-d0b5-4a29-8b1b-abdb8ce917e4" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "c3bb20a7-df8c-4461-8cfb-9e2a978c00da", "name": "", "created_at": "2025-03-12T04:49:28.349Z", "updated_at": "2025-12-18T03:09:25.092Z", "owner_id": "734e66b8-ae70-4069-9ebb-7eb7ee3967d5", "owner_type": "", "total_size": 762363, "nb_documents": 896591, "chunk_size": 507889} + "422": + application/json: {} + libraries_documents_list_v1: + speakeasy-default-libraries-documents-list-v1: + parameters: + path: + library_id: "5c3ca4cd-62bc-4c71-ad8a-1531ae80d078" + query: + page_size: 100 + page: 0 + sort_by: "created_at" + sort_order: "desc" + responses: + "200": + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} "422": application/json: {} - retrieve_model_v1_models__model_id__get: - speakeasy-default-retrieve-model-v1-models-model-id-get: + libraries_documents_upload_v1: + speakeasy-default-libraries-documents-upload-v1: parameters: path: - model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + library_id: "a02150d9-5ee0-4877-b62c-28b1fcdf3b76" + requestBody: + multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} "422": application/json: {} - delete_model_v1_models__model_id__delete: - speakeasy-default-delete-model-v1-models-model-id-delete: + libraries_documents_get_v1: + speakeasy-default-libraries-documents-get-v1: parameters: path: - model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + library_id: "03d908c8-90a1-44fd-bf3a-8490fb7c9a03" + document_id: "90973aec-0508-4375-8b00-91d732414745" responses: "200": - application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} "422": application/json: {} - jobs_api_routes_fine_tuning_update_fine_tuned_model: - speakeasy-default-jobs-api-routes-fine-tuning-update-fine-tuned-model: + libraries_documents_update_v1: + speakeasy-default-libraries-documents-update-v1: parameters: path: - model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + library_id: "3ddd8d93-dca5-4a6d-980d-173226c35742" + document_id: "2a25e44c-b160-40ca-b5c2-b65fb2fcae34" requestBody: application/json: {} responses: "200": - application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} - jobs_api_routes_fine_tuning_archive_fine_tuned_model: - speakeasy-default-jobs-api-routes-fine-tuning-archive-fine-tuned-model: + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} + "422": + application/json: {} + libraries_documents_delete_v1: + speakeasy-default-libraries-documents-delete-v1: parameters: path: - model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + library_id: "005daae9-d42e-407d-82d7-2261c6a1496c" + document_id: "edc236b0-baff-49a9-884b-4ca36a258da4" responses: - "200": - application/json: {"id": "", "object": "model", "archived": true} - jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: - speakeasy-default-jobs-api-routes-fine-tuning-unarchive-fine-tuned-model: + "422": + application/json: {} + libraries_documents_get_text_content_v1: + speakeasy-default-libraries-documents-get-text-content-v1: parameters: path: - model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" - responses: - "200": - application/json: {"id": "", "object": "model", "archived": false} - agents_api_v1_conversations_start: - speakeasy-default-agents-api-v1-conversations-start: - requestBody: - application/json: {"inputs": "", "stream": false} + library_id: "1d177215-3b6b-45ba-9fa9-baf773223bec" + document_id: "60214c91-2aba-4692-a4e6-a53365de8caf" responses: "200": - application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + application/json: {"text": ""} "422": application/json: {} - agents_api_v1_conversations_list: - speakeasy-default-agents-api-v1-conversations-list: + libraries_documents_get_status_v1: + speakeasy-default-libraries-documents-get-status-v1: parameters: - query: - page: 0 - page_size: 100 + path: + library_id: "e6906f70-368f-4155-80da-c1718f01bc43" + document_id: "2c904915-d831-4e9d-a345-8ce405bcef66" responses: "200": - application/json: [{"object": "conversation", "id": "", "created_at": "2025-11-20T22:30:47.754Z", "updated_at": "2025-08-05T08:36:20.296Z", "agent_id": ""}] + application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "processing_status": ""} "422": application/json: {} - agents_api_v1_conversations_get: - speakeasy-default-agents-api-v1-conversations-get: + libraries_documents_get_signed_url_v1: + speakeasy-default-libraries-documents-get-signed-url-v1: parameters: path: - conversation_id: "" + library_id: "23cf6904-a602-4ee8-9f5b-8efc557c336d" + document_id: "48598486-df71-4994-acbb-1133c72efa8c" responses: "200": - application/json: {"object": "conversation", "id": "", "created_at": "2023-06-02T14:00:42.201Z", "updated_at": "2024-10-06T17:16:50.325Z", "agent_id": ""} + application/json: "https://burdensome-jellyfish.name" "422": application/json: {} - agents_api_v1_conversations_append: - speakeasy-default-agents-api-v1-conversations-append: + libraries_documents_get_extracted_text_signed_url_v1: + speakeasy-default-libraries-documents-get-extracted-text-signed-url-v1: parameters: path: - conversation_id: "" - requestBody: - application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server"} + library_id: "a6f15de3-1e82-4f95-af82-851499042ef8" + document_id: "9749d4f9-24e5-4ca2-99a3-a406863f805d" responses: "200": - application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + application/json: "https://athletic-disadvantage.info" "422": application/json: {} - agents_api_v1_conversations_history: - speakeasy-default-agents-api-v1-conversations-history: + libraries_documents_reprocess_v1: + speakeasy-default-libraries-documents-reprocess-v1: parameters: path: - conversation_id: "" + library_id: "51b29371-de8f-4ba4-932b-a0bafb3a7f64" + document_id: "3052422c-49ca-45ac-a918-cadb35d61fd8" responses: - "200": - application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "tool.execution", "name": "image_generation", "arguments": ""}]} "422": application/json: {} - agents_api_v1_conversations_messages: - speakeasy-default-agents-api-v1-conversations-messages: + libraries_share_list_v1: + speakeasy-default-libraries-share-list-v1: parameters: path: - conversation_id: "" + library_id: "d2169833-d8e2-416e-a372-76518d3d99c2" responses: "200": - application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + application/json: {"data": [{"library_id": "26c08a7a-d226-4d29-b4d8-c08f0ad41dd1", "org_id": "254e3633-51b9-47a9-bc14-466ecf29d167", "role": "", "share_with_type": "", "share_with_uuid": "815eb88e-1f97-4782-863f-5fd00d37268b"}]} "422": application/json: {} - agents_api_v1_conversations_restart: - speakeasy-default-agents-api-v1-conversations-restart: + libraries_share_create_v1: + speakeasy-default-libraries-share-create-v1: parameters: path: - conversation_id: "" + library_id: "36de3a24-5b1c-4c8f-9d84-d5642205a976" requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} + application/json: {"level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} responses: "200": - application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} - "422": - application/json: {} - agents_api_v1_conversations_start_stream: - speakeasy-default-agents-api-v1-conversations-start-stream: - requestBody: - application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true} - responses: + application/json: {"library_id": "45b3a5b2-8b81-4453-9130-ded7f1e5a366", "org_id": "0fa6e542-f04b-431e-a1be-76a9a92b0e68", "role": "", "share_with_type": "", "share_with_uuid": "cdbcc0c5-e577-4880-8ed3-f919421d4fc5"} "422": application/json: {} - agents_api_v1_conversations_append_stream: - speakeasy-default-agents-api-v1-conversations-append-stream: + libraries_share_delete_v1: + speakeasy-default-libraries-share-delete-v1: parameters: path: - conversation_id: "" + library_id: "709e3cad-9fb2-4f4e-bf88-143cf1808107" requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} + application/json: {"share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} responses: + "200": + application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} "422": application/json: {} - agents_api_v1_conversations_restart_stream: - speakeasy-default-agents-api-v1-conversations-restart-stream: + audio_api_v1_transcriptions_post: + speakeasy-default-audio-api-v1-transcriptions-post: + requestBody: + multipart/form-data: {"model": "Model X", "stream": false} + responses: + "200": + application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} + userExample: + requestBody: + multipart/form-data: {"model": "voxtral-mini-latest", "stream": false, "diarize": false} + responses: + "200": + application/json: {"model": "voxtral-mini-2507", "text": "This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms, in schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better President, and you made me a better man.\nOver the course of these eight years, I've seen the goodness, the resilience, and the hope of the American people. I've seen neighbors looking out for each other as we rescued our economy from the worst crisis of our lifetimes. I've hugged cancer survivors who finally know the security of affordable health care. I've seen communities like Joplin rebuild from disaster, and cities like Boston show the world that no terrorist will ever break the American spirit. I've seen the hopeful faces of young graduates and our newest military officers. I've mourned with grieving families searching for answers. And I found grace in a Charleston church. I've seen our scientists help a paralyzed man regain his sense of touch, and our wounded warriors walk again. I've seen our doctors and volunteers rebuild after earthquakes and stop pandemics in their tracks. I've learned from students who are building robots and curing diseases, and who will change the world in ways we can't even imagine. I've seen the youngest of children remind us of our obligations to care for our refugees, to work in peace, and above all, to look out for each other.\nThat's what's possible when we come together in the slow, hard, sometimes frustrating, but always vital work of self-government. But we can't take our democracy for granted. All of us, regardless of party, should throw ourselves into the work of citizenship. Not just when there is an election. Not just when our own narrow interest is at stake. But over the full span of a lifetime. If you're tired of arguing with strangers on the Internet, try to talk with one in real life. If something needs fixing, lace up your shoes and do some organizing. If you're disappointed by your elected officials, then grab a clipboard, get some signatures, and run for office yourself.\nOur success depends on our participation, regardless of which way the pendulum of power swings. It falls on each of us to be guardians of our democracy, to embrace the joyous task we've been given to continually try to improve this great nation of ours. Because for all our outward differences, we all share the same proud title – citizen.\nIt has been the honor of my life to serve you as President. Eight years later, I am even more optimistic about our country's promise. And I look forward to working along your side as a citizen for all my days that remain.\nThanks, everybody. God bless you. And God bless the United States of America.\n", "segments": [], "usage": {"prompt_tokens": 4, "completion_tokens": 635, "total_tokens": 3264, "prompt_audio_seconds": 203}, "language": "en"} + audio_api_v1_transcriptions_post_stream: + speakeasy-default-audio-api-v1-transcriptions-post-stream: + requestBody: + multipart/form-data: {"model": "Camry", "stream": true, "diarize": false} + agents_api_v1_conversations_delete: + speakeasy-default-agents-api-v1-conversations-delete: parameters: path: conversation_id: "" - requestBody: - application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} responses: "422": application/json: {} - agents_api_v1_agents_create: - speakeasy-default-agents-api-v1-agents-create: - requestBody: - application/json: {"model": "LeBaron", "name": ""} + agents_api_v1_agents_delete: + speakeasy-default-agents-api-v1-agents-delete: + parameters: + path: + agent_id: "" responses: - "200": - application/json: {"model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z"} "422": application/json: {} - agents_api_v1_agents_list: - speakeasy-default-agents-api-v1-agents-list: + agents_api_v1_agents_list_versions: + speakeasy-default-agents-api-v1-agents-list-versions: parameters: + path: + agent_id: "" query: page: 0 page_size: 20 responses: "200": - application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z"}] + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Prius", "name": "", "object": "agent", "id": "", "version": 866135, "versions": [849276], "created_at": "2024-07-03T17:01:49.200Z", "updated_at": "2026-06-15T18:44:26.883Z", "deployment_chat": true, "source": ""}] "422": application/json: {} - agents_api_v1_agents_get: - speakeasy-default-agents-api-v1-agents-get: + agents_api_v1_agents_get_version: + speakeasy-default-agents-api-v1-agents-get-version: parameters: path: agent_id: "" + version: "788393" responses: "200": - application/json: {"model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model 3", "name": "", "object": "agent", "id": "", "version": 377706, "versions": [658369, 642981], "created_at": "2024-10-02T23:01:15.980Z", "updated_at": "2026-12-22T00:55:26.568Z", "deployment_chat": false, "source": ""} "422": application/json: {} - agents_api_v1_agents_update: - speakeasy-default-agents-api-v1-agents-update: + agents_api_v1_agents_create_or_update_alias: + speakeasy-default-agents-api-v1-agents-create-or-update-alias: parameters: path: agent_id: "" - requestBody: - application/json: {} + query: + alias: "" + version: 595141 responses: "200": - application/json: {"model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z"} + application/json: {"alias": "", "version": 768764, "created_at": "2026-12-28T00:40:21.715Z", "updated_at": "2025-09-01T12:54:58.254Z"} "422": application/json: {} - agents_api_v1_agents_update_version: - speakeasy-default-agents-api-v1-agents-update-version: + agents_api_v1_agents_list_version_aliases: + speakeasy-default-agents-api-v1-agents-list-version-aliases: parameters: path: agent_id: "" - query: - version: 157995 responses: "200": - application/json: {"model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z"} + application/json: [{"alias": "", "version": 318290, "created_at": "2025-10-02T20:25:32.322Z", "updated_at": "2026-11-19T02:58:37.894Z"}] "422": application/json: {} - files_api_routes_upload_file: - speakeasy-default-files-api-routes-upload-file: - requestBody: - multipart/form-data: {"file": "x-file: example.file"} - responses: - "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} - files_api_routes_list_files: - speakeasy-default-files-api-routes-list-files: - parameters: - query: - page: 0 - page_size: 100 + ListModels: + userExample: responses: "200": - application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload"}], "object": "", "total": 999335} - files_api_routes_retrieve_file: - speakeasy-default-files-api-routes-retrieve-file: + application/json: {"object": "list"} + RetrieveModel: + speakeasy-default-retrieve-model: parameters: path: - file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "instruct", "source": "repository", "deleted": false} - files_api_routes_delete_file: - speakeasy-default-files-api-routes-delete-file: + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "base"} + "422": + application/json: {} + userExample: parameters: path: - file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} - files_api_routes_download_file: - speakeasy-default-files-api-routes-download-file: + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Principal Implementation Assistant", "root": "", "archived": false} + DeleteModel: + speakeasy-default-delete-model: parameters: path: - file_id: "f8919994-a4a1-46b2-8b5b-06335a4300ce" + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/octet-stream: "x-file: example.file" - files_api_routes_get_signed_url: - speakeasy-default-files-api-routes-get-signed-url: + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + "422": + application/json: {} + userExample: parameters: path: - file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" - query: - expiry: 24 + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"url": "https://knotty-birdcage.net/"} - jobs_api_routes_fine_tuning_get_fine_tuning_jobs: - speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + UpdateModel: + speakeasy-default-update-model: parameters: - query: - page: 0 - page_size: 100 - created_by_me: false - responses: - "200": - application/json: {"object": "list", "total": 843585} - jobs_api_routes_fine_tuning_create_fine_tuning_job: - speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" requestBody: - application/json: {"model": "Camaro", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + application/json: {} responses: "200": - application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} - jobs_api_routes_fine_tuning_get_fine_tuning_job: - speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: + application/json: {"id": "", "object": "model", "created": 76471, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "9765ed11-3bc9-49ff-a19d-06665406d404", "model_type": "completion"} + ArchiveModel: + speakeasy-default-archive-model: parameters: path: - job_id: "c167a961-ffca-4bcf-93ac-6169468dd389" + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "babac92a-96fa-48c4-931c-f6f97e1bf24c", "auto_start": false, "model": "Spyder", "status": "FAILED", "created_at": 232438, "modified_at": 32259, "training_files": ["7a95c5a0-399d-4665-84c8-deab766d22dc"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 5651, "loss_function": "single_class"}]} - jobs_api_routes_fine_tuning_cancel_fine_tuning_job: - speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: + application/json: {"id": "", "object": "model", "archived": true} + UnarchiveModel: + speakeasy-default-unarchive-model: parameters: path: - job_id: "6188a2f6-7513-4e0f-89cc-3f8088523a49" + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "770b9cc0-1ab6-44de-a816-67010644e9fb", "auto_start": false, "model": "Volt", "status": "CANCELLATION_REQUESTED", "created_at": 546404, "modified_at": 180081, "training_files": ["45e621c6-ac30-4133-b6d1-fc0d1fe24c9f"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [""], "weight": 1298.58, "loss_function": "multi_class"}]} - jobs_api_routes_fine_tuning_start_fine_tuning_job: - speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: - parameters: - path: - job_id: "56553e4d-0679-471e-b9ac-59a77d671103" + application/json: {"id": "", "object": "model", "archived": false} + StartConversation: + speakeasy-default-start-conversation: + requestBody: + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} responses: "200": - application/json: {"id": "68ad461a-676e-47fe-a07e-15e38f5082b5", "auto_start": false, "model": "Grand Cherokee", "status": "STARTED", "created_at": 134515, "modified_at": 192651, "training_files": ["39dabc3d-15eb-49ac-a549-69973f33acee"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} - jobs_api_routes_batch_get_batch_jobs: - speakeasy-default-jobs-api-routes-batch-get-batch-jobs: + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + ListConversations: + speakeasy-default-list-conversations: parameters: query: page: 0 page_size: 100 - created_by_me: false responses: "200": - application/json: {"object": "list", "total": 186589} - jobs_api_routes_batch_create_batch_job: - speakeasy-default-jobs-api-routes-batch-create-batch-job: + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "object": "conversation", "id": "", "created_at": "2026-05-02T18:35:22.595Z", "updated_at": "2024-04-15T10:58:56.705Z", "model": "Silverado"}] + "422": + application/json: {} + GetConversation: + speakeasy-default-get-conversation: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2026-10-30T16:36:24.274Z", "updated_at": "2026-03-08T22:30:16.213Z", "agent_id": ""} + "422": + application/json: {} + DeleteConversation: + speakeasy-default-delete-conversation: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + AppendConversation: + speakeasy-default-append-conversation: + parameters: + path: + conversation_id: "" requestBody: - application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "timeout_hours": 24} + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} - jobs_api_routes_batch_get_batch_job: - speakeasy-default-jobs-api-routes-batch-get-batch-job: + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": {"key": "", "key1": ""}}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + GetConversationHistory: + speakeasy-default-get-conversation-history: parameters: path: - job_id: "4017dc9f-b629-42f4-9700-8c681b9e7f0f" + conversation_id: "" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} - jobs_api_routes_batch_cancel_batch_job: - speakeasy-default-jobs-api-routes-batch-cancel-batch-job: + application/json: {"object": "conversation.history", "conversation_id": "", "entries": []} + "422": + application/json: {} + GetConversationMessages: + speakeasy-default-get-conversation-messages: parameters: path: - job_id: "4fb29d1c-535b-4f0a-a1cb-2167f86da569" + conversation_id: "" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + RestartConversation: + speakeasy-default-restart-conversation: + parameters: + path: + conversation_id: "" requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} "422": application/json: {} - stream_chat: - speakeasy-default-stream-chat: + StartConversationStream: + speakeasy-default-start-conversation-stream: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"inputs": "", "stream": true, "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} - fim_completion_v1_fim_completions_post: - speakeasy-default-fim-completion-v1-fim-completions-post: + AppendConversationStream: + speakeasy-default-append-conversation-stream: + parameters: + path: + conversation_id: "" requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} - stream_fim: - speakeasy-default-stream-fim: + RestartConversationStream: + speakeasy-default-restart-conversation-stream: + parameters: + path: + conversation_id: "" requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "422": application/json: {} - agents_completion_v1_agents_completions_post: - speakeasy-default-agents-completion-v1-agents-completions-post: + CreateAgent: + speakeasy-default-create-agent: requestBody: - application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Mustang", "name": ""} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Taurus", "name": "", "object": "agent", "id": "", "version": 388058, "versions": [980917, 959600], "created_at": "2024-07-23T17:25:11.997Z", "updated_at": "2025-07-14T09:13:03.268Z", "deployment_chat": false, "source": ""} "422": application/json: {} - stream_agents: - speakeasy-default-stream-agents: - requestBody: - application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + ListAgents: + speakeasy-default-list-agents: + parameters: + query: + page: 0 + page_size: 20 responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Challenger", "name": "", "object": "agent", "id": "", "version": 679172, "versions": [491437], "created_at": "2026-05-11T12:36:32.958Z", "updated_at": "2026-08-23T04:04:31.448Z", "deployment_chat": false, "source": ""}] "422": application/json: {} - embeddings_v1_embeddings_post: - speakeasy-default-embeddings-v1-embeddings-post: - requestBody: - application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + GetAgent: + speakeasy-default-get-agent: + parameters: + path: + agent_id: "" responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "F-150", "name": "", "object": "agent", "id": "", "version": 928666, "versions": [246402], "created_at": "2024-02-28T12:05:26.160Z", "updated_at": "2024-05-16T04:31:56.940Z", "deployment_chat": false, "source": ""} "422": application/json: {} - moderations_v1_moderations_post: - speakeasy-default-moderations-v1-moderations-post: + UpdateAgent: + speakeasy-default-update-agent: + parameters: + path: + agent_id: "" requestBody: - application/json: {"model": "Durango", "input": ["", ""]} + application/json: {"completion_args": {"response_format": {"type": "text"}}} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Corvette", "results": [{}]} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "1", "name": "", "object": "agent", "id": "", "version": 388537, "versions": [955918, 365950, 823288], "created_at": "2026-11-04T08:06:14.896Z", "updated_at": "2025-05-23T04:44:27.181Z", "deployment_chat": true, "source": ""} "422": application/json: {} - chat_moderations_v1_chat_moderations_post: - speakeasy-default-chat-moderations-v1-chat-moderations-post: - requestBody: - application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + DeleteAgent: + speakeasy-default-delete-agent: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} + UpdateAgentVersion: + speakeasy-default-update-agent-version: + parameters: + path: + agent_id: "" + query: + version: 958693 responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Explorer", "results": [{}]} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 203502, "versions": [449666], "created_at": "2024-09-21T15:29:30.503Z", "updated_at": "2026-10-29T17:49:52.509Z", "deployment_chat": true, "source": ""} "422": application/json: {} - classifications_v1_classifications_post: - speakeasy-default-classifications-v1-classifications-post: - requestBody: - application/json: {"model": "Silverado", "input": [""]} + ListAgentVersions: + speakeasy-default-list-agent-versions: + parameters: + path: + agent_id: "" + query: + page: 0 + page_size: 20 responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "ATS", "results": [{}, {"key": {"scores": {"key": 2080.19}}}]} + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Volt", "name": "", "object": "agent", "id": "", "version": 45747, "versions": [584697, 811109], "created_at": "2024-12-19T10:33:53.873Z", "updated_at": "2025-10-05T12:31:56.977Z", "deployment_chat": false, "source": ""}] "422": application/json: {} - chat_classifications_v1_chat_classifications_post: - speakeasy-default-chat-classifications-v1-chat-classifications-post: - requestBody: - application/json: {"model": "Camry", "input": [{"messages": [{"content": "", "role": "system"}]}]} + GetAgentVersion: + speakeasy-default-get-agent-version: + parameters: + path: + agent_id: "" + version: "" responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Altima", "results": [{}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}]} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Camaro", "name": "", "object": "agent", "id": "", "version": 663020, "versions": [210212], "created_at": "2026-11-16T03:32:55.781Z", "updated_at": "2026-09-28T23:51:49.611Z", "deployment_chat": true, "source": ""} "422": application/json: {} - ocr_v1_ocr_post: - speakeasy-default-ocr-v1-ocr-post: - requestBody: - application/json: {"model": "CX-9", "document": {"image_url": {"url": "https://measly-scrap.com"}, "type": "image_url"}} + CreateOrUpdateAgentAlias: + speakeasy-default-create-or-update-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 154719 responses: "200": - application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} + application/json: {"alias": "", "version": 991981, "created_at": "2025-09-02T11:06:53.872Z", "updated_at": "2024-12-17T11:05:04.936Z"} "422": application/json: {} - libraries_list_v1: - speakeasy-default-libraries-list-v1: + ListAgentAliases: + speakeasy-default-list-agent-aliases: + parameters: + path: + agent_id: "" responses: "200": - application/json: {"data": [{"id": "bfc452fd-4bcb-46ec-9f68-ceea101e924d", "name": "", "created_at": "2024-01-31T13:50:47.409Z", "updated_at": "2023-04-09T15:28:24.261Z", "owner_id": "3fb92cf9-0fea-44d0-958f-16963601a1f0", "owner_type": "", "total_size": 811051, "nb_documents": 634577, "chunk_size": 502060}]} - libraries_create_v1: - speakeasy-default-libraries-create-v1: + application/json: [{"alias": "", "version": 345116, "created_at": "2025-03-19T21:46:52.564Z", "updated_at": "2026-07-18T22:23:53.218Z"}] + "422": + application/json: {} + DeleteAgentAlias: + speakeasy-default-delete-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} + ListLibraries: + speakeasy-default-list-libraries: + responses: + "200": + application/json: {"data": []} + CreateLibrary: + speakeasy-default-create-library: requestBody: application/json: {"name": ""} responses: "201": - application/json: {"id": "7285d921-bbab-471e-a2df-600e096d8aca", "name": "", "created_at": "2025-12-10T18:12:15.618Z", "updated_at": "2023-12-29T15:14:03.343Z", "owner_id": "d5e2af8f-c98a-479e-aece-62d79ea6bab3", "owner_type": "", "total_size": 866940, "nb_documents": 123652, "chunk_size": 274694} + application/json: {"id": "7a160c5d-b74e-44df-8beb-aca6894fa845", "name": "", "created_at": "2026-05-19T08:13:56.459Z", "updated_at": "2026-06-02T23:02:36.047Z", "owner_id": null, "owner_type": "", "total_size": 236146, "nb_documents": 584286, "chunk_size": 369781} "422": application/json: {} - libraries_get_v1: - speakeasy-default-libraries-get-v1: + GetLibrary: + speakeasy-default-get-library: parameters: path: - library_id: "d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f" + library_id: "44e385d6-783e-4b21-8fae-5181e6817bc4" responses: "200": - application/json: {"id": "24e6ac5e-61cb-4f2c-b0c0-806dfd5d8dbf", "name": "", "created_at": "2023-01-19T09:20:07.756Z", "updated_at": "2023-05-28T00:39:57.656Z", "owner_id": "546a730e-7d06-4324-a4fd-2b7ff127978c", "owner_type": "", "total_size": 191122, "nb_documents": 932135, "chunk_size": null} + application/json: {"id": "785b8f2b-04c4-4e51-aeee-10ba7210996d", "name": "", "created_at": "2026-06-02T21:55:32.209Z", "updated_at": "2026-06-28T12:11:02.341Z", "owner_id": "489e38bd-4195-4ab1-a06d-f1253bcc0e7a", "owner_type": "", "total_size": 733226, "nb_documents": 896348, "chunk_size": 594373} "422": application/json: {} - libraries_delete_v1: - speakeasy-default-libraries-delete-v1: + DeleteLibrary: + speakeasy-default-delete-library: parameters: path: - library_id: "6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a" + library_id: "441ba08a-3d1f-4700-8d6f-f32eeed49dff" responses: "200": - application/json: {"id": "04e83772-3f8e-41d3-a053-763ed9937e07", "name": "", "created_at": "2025-03-15T23:45:26.060Z", "updated_at": "2024-08-03T06:23:12.129Z", "owner_id": "f636aa46-e1d5-4df4-966b-de4af27da6db", "owner_type": "", "total_size": 268102, "nb_documents": 821714, "chunk_size": null} + application/json: {"id": "a03c22a9-d4f2-4735-806c-b8497fe2a882", "name": "", "created_at": "2024-03-20T22:16:14.073Z", "updated_at": "2025-08-10T22:18:39.851Z", "owner_id": null, "owner_type": "", "total_size": 735078, "nb_documents": 443485, "chunk_size": 738927} "422": application/json: {} - libraries_update_v1: - speakeasy-default-libraries-update-v1: + UpdateLibrary: + speakeasy-default-update-library: parameters: path: - library_id: "e01880c3-d0b5-4a29-8b1b-abdb8ce917e4" + library_id: "27049553-3425-49ce-b965-fcb3a7ab03a3" requestBody: application/json: {} responses: "200": - application/json: {"id": "c3bb20a7-df8c-4461-8cfb-9e2a978c00da", "name": "", "created_at": "2025-03-12T04:49:28.349Z", "updated_at": "2025-12-18T03:09:25.092Z", "owner_id": "734e66b8-ae70-4069-9ebb-7eb7ee3967d5", "owner_type": "", "total_size": 762363, "nb_documents": 896591, "chunk_size": 507889} + application/json: {"id": "0c44cb97-9c48-4e8b-9837-239b80130faf", "name": "", "created_at": "2025-02-22T01:07:38.404Z", "updated_at": "2024-01-02T09:35:39.994Z", "owner_id": "9ea3bb36-40f8-41f9-ba61-d6f71a725ff2", "owner_type": "", "total_size": 234996, "nb_documents": 664396, "chunk_size": 337104} "422": application/json: {} - libraries_documents_list_v1: - speakeasy-default-libraries-documents-list-v1: + ListDocuments: + speakeasy-default-list-documents: parameters: path: - library_id: "5c3ca4cd-62bc-4c71-ad8a-1531ae80d078" + library_id: "05e1bda5-99b1-4baf-bb03-905d8e094f74" query: page_size: 100 page: 0 @@ -1248,151 +4029,1011 @@ examples: sort_order: "desc" responses: "200": - application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "processing_status": "", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "tokens_processing_total": 957230}]} + application/json: {"pagination": {"total_items": 985775, "total_pages": 196446, "current_page": 86746, "page_size": 671573, "has_more": false}, "data": [{"id": "9b168ce6-0e63-4d0a-b784-71cab0b43775", "library_id": "01d6c3ae-df9c-448d-8e84-873b6588d655", "hash": "", "mime_type": "", "extension": "shtml", "size": null, "name": "", "created_at": "2024-06-29T16:51:59.433Z", "processing_status": "", "uploaded_by_id": "ce40c587-9bb9-48d4-8bd3-5ce14f8f07c8", "uploaded_by_type": "", "tokens_processing_total": 288046}]} "422": application/json: {} - libraries_documents_upload_v1: - speakeasy-default-libraries-documents-upload-v1: + UploadDocument: + speakeasy-default-upload-document: parameters: path: - library_id: "a02150d9-5ee0-4877-b62c-28b1fcdf3b76" + library_id: "f973c54e-979a-4464-9d36-8cc31beb21fe" requestBody: multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "processing_status": "", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "tokens_processing_total": 61161} + application/json: {"id": "a13f4191-9721-413d-ac5c-b8edadbfb34e", "library_id": "a6ea3cdd-242f-4132-baf8-9a2589d78cb2", "hash": "", "mime_type": "", "extension": "mp4v", "size": 731796, "name": "", "created_at": "2024-04-30T08:38:55.667Z", "processing_status": "", "uploaded_by_id": "fd1426b3-90f8-4b54-97de-c4f108cb2a63", "uploaded_by_type": "", "tokens_processing_total": 603440} "422": application/json: {} - libraries_documents_get_v1: - speakeasy-default-libraries-documents-get-v1: + GetDocument: + speakeasy-default-get-document: parameters: path: - library_id: "03d908c8-90a1-44fd-bf3a-8490fb7c9a03" - document_id: "90973aec-0508-4375-8b00-91d732414745" + library_id: "f9902d0a-1ea4-4953-be48-52df6edd302a" + document_id: "c3e12fd9-e840-46f2-8d4a-79985ed36d24" responses: "200": - application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "processing_status": "", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "tokens_processing_total": 793889} + application/json: {"id": "52c93ba5-b31c-4717-a099-f3415e6d4eea", "library_id": "912f1e36-456c-4551-bd6d-535931a66817", "hash": "", "mime_type": "", "extension": "wav", "size": null, "name": "", "created_at": "2026-09-30T22:43:59.455Z", "processing_status": "", "uploaded_by_id": "8578215b-d0b4-4ee2-857d-dcb0686d45f1", "uploaded_by_type": "", "tokens_processing_total": 833979} "422": application/json: {} - libraries_documents_update_v1: - speakeasy-default-libraries-documents-update-v1: + UpdateDocument: + speakeasy-default-update-document: parameters: path: - library_id: "3ddd8d93-dca5-4a6d-980d-173226c35742" - document_id: "2a25e44c-b160-40ca-b5c2-b65fb2fcae34" + library_id: "3b900c67-d2b6-4637-93f2-3eff2c85f8dd" + document_id: "66f935fd-37ec-441f-bca5-b1129befcbca" requestBody: application/json: {} responses: "200": - application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "processing_status": "", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "tokens_processing_total": 806683} + application/json: {"id": "6a5ec2ab-bd54-4cc8-a761-e51374243293", "library_id": "f8b3b9a7-bb4b-4b47-b3b2-bb6db5e92901", "hash": "", "mime_type": "", "extension": "gif", "size": null, "name": "", "created_at": "2025-11-09T02:41:11.680Z", "processing_status": "", "uploaded_by_id": "0f707dfd-bd39-42ad-9748-c0b305a13eb6", "uploaded_by_type": "", "tokens_processing_total": 170388} "422": application/json: {} - libraries_documents_delete_v1: - speakeasy-default-libraries-documents-delete-v1: + DeleteDocument: + speakeasy-default-delete-document: parameters: path: - library_id: "005daae9-d42e-407d-82d7-2261c6a1496c" - document_id: "edc236b0-baff-49a9-884b-4ca36a258da4" + library_id: "c728d742-7845-462b-84ad-2aacbaf1c7cf" + document_id: "ed3f5797-846a-4abe-8e30-39b2fd2323e0" responses: "422": application/json: {} - libraries_documents_get_text_content_v1: - speakeasy-default-libraries-documents-get-text-content-v1: + GetDocumentTextContent: + speakeasy-default-get-document-text-content: parameters: path: - library_id: "1d177215-3b6b-45ba-9fa9-baf773223bec" - document_id: "60214c91-2aba-4692-a4e6-a53365de8caf" + library_id: "12689dc1-50df-4a0d-8202-2757f7a8c141" + document_id: "9d4057e9-d112-437c-911e-6ee031389739" responses: "200": application/json: {"text": ""} "422": application/json: {} - libraries_documents_get_status_v1: - speakeasy-default-libraries-documents-get-status-v1: + GetDocumentStatus: + speakeasy-default-get-document-status: parameters: path: - library_id: "e6906f70-368f-4155-80da-c1718f01bc43" - document_id: "2c904915-d831-4e9d-a345-8ce405bcef66" + library_id: "41bb33c4-7e53-453d-bf21-398bb2862772" + document_id: "416b95cf-19c8-45af-84be-26aaa3ab3666" responses: "200": - application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "processing_status": ""} + application/json: {"document_id": "b5b43c40-8e91-41d9-933c-096ee588639a", "processing_status": ""} "422": application/json: {} - libraries_documents_get_signed_url_v1: - speakeasy-default-libraries-documents-get-signed-url-v1: + GetDocumentSignedUrl: + speakeasy-default-get-document-signed-url: parameters: path: - library_id: "23cf6904-a602-4ee8-9f5b-8efc557c336d" - document_id: "48598486-df71-4994-acbb-1133c72efa8c" + library_id: "2dbbe172-1374-41be-b03d-a088c733612e" + document_id: "b5d88764-47f1-4485-9df1-658775428344" responses: "200": - application/json: "https://burdensome-jellyfish.name" + application/json: "" "422": application/json: {} - libraries_documents_get_extracted_text_signed_url_v1: - speakeasy-default-libraries-documents-get-extracted-text-signed-url-v1: + GetDocumentExtractedTextSignedUrl: + speakeasy-default-get-document-extracted-text-signed-url: + parameters: + path: + library_id: "46d040ce-ae2e-4891-a54c-cdab6a8f62d8" + document_id: "3eddbfe2-3fd7-47f5-984b-b378e6950e37" + responses: + "200": + application/json: "" + "422": + application/json: {} + ReprocessDocument: + speakeasy-default-reprocess-document: + parameters: + path: + library_id: "76d357e4-d891-40c6-9d1e-6d6ce5056ee0" + document_id: "09798d2b-8f46-46c6-9765-8054a82a4bb2" + responses: + "422": + application/json: {} + ListLibraryAccesses: + speakeasy-default-list-library-accesses: + parameters: + path: + library_id: "9eb628ef-f118-47eb-b3cc-9750c4ca5fb6" + responses: + "200": + application/json: {"data": [{"library_id": "98821ea0-f6e2-444d-b922-e649cd549a2a", "org_id": "a33230f8-b93d-4f45-80ce-b45e8dd8b5fe", "role": "", "share_with_type": "", "share_with_uuid": "0e1f6eb2-b59e-4e38-b916-382b3383c228"}]} + "422": + application/json: {} + UpdateOrCreateLibraryAccess: + speakeasy-default-update-or-create-library-access: + parameters: + path: + library_id: "88bb030c-1cb5-4231-ba13-742c56554876" + requestBody: + application/json: {"level": "Viewer", "share_with_uuid": "6a736283-c1fa-49b0-9b6d-ea9309c0a766", "share_with_type": "Workspace"} + responses: + "200": + application/json: {"library_id": "b783a30a-ca47-4c15-8095-dee3502846e5", "org_id": "6721ec8e-e0c0-4e8e-be83-3c01f2f884a5", "role": "", "share_with_type": "", "share_with_uuid": null} + "422": + application/json: {} + DeleteLibraryAccess: + speakeasy-default-delete-library-access: + parameters: + path: + library_id: "fc7ab1cf-e33c-4791-a6e0-95ff1f921c43" + requestBody: + application/json: {"share_with_uuid": "5818ddff-3568-40f1-a9e4-39d6cb9f5c94", "share_with_type": "Org"} + responses: + "200": + application/json: {"library_id": "6eeb1c0b-8c49-4745-8e3a-eef5bace0782", "org_id": "36550d6e-a514-4601-bd5b-7a0978aab0c7", "role": "", "share_with_type": "", "share_with_uuid": "023a9d84-8615-44a6-acd3-59b113a45c43"} + "422": + application/json: {} + UploadFile: + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} + ListFiles: + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} + RetrieveFile: + userExample: + parameters: + path: + file_id: "654a62d9-b7ee-49ac-835e-af4153e3c9ec" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} + DeleteFile: + userExample: + parameters: + path: + file_id: "789c27a4-69de-47c6-b67f-cf6e56ce9f41" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} + DownloadFile: + speakeasy-default-download-file: + parameters: + path: + file_id: "e2ba278e-eac9-4050-ae8e-ec433e124efb" + responses: + "200": + application/octet-stream: "x-file: example.file" + GetFileSignedUrl: + userExample: + parameters: + path: + file_id: "7a0c108d-9e6b-4c47-990d-a20cba50b283" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} + ListFineTuningJobs: + speakeasy-default-list-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"object": "list", "total": 677316} + CreateFineTuningJob: + speakeasy-default-create-fine-tuning-job: + requestBody: + application/json: {"model": "Countach", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + responses: + "200": + application/json: {"id": "18371b47-e157-4d80-8d09-2687df8868e8", "auto_start": false, "model": "Fiesta", "status": "FAILED", "created_at": 475667, "modified_at": 452225, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + GetFineTuningJob: + speakeasy-default-get-fine-tuning-job: parameters: path: - library_id: "a6f15de3-1e82-4f95-af82-851499042ef8" - document_id: "9749d4f9-24e5-4ca2-99a3-a406863f805d" + job_id: "2855f873-414e-4cf5-a46e-e589e39ee809" responses: "200": - application/json: "https://athletic-disadvantage.info" - "422": - application/json: {} - libraries_documents_reprocess_v1: - speakeasy-default-libraries-documents-reprocess-v1: + application/json: {"id": "b9f4ad32-1400-4751-8e0d-16c09b4b26e6", "auto_start": true, "model": "LeBaron", "status": "QUEUED", "created_at": 458966, "modified_at": 377090, "training_files": ["52d812c3-b5fe-4866-878e-39a5910f91df"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 6490.15, "loss_function": "single_class"}]} + CancelFineTuningJob: + speakeasy-default-cancel-fine-tuning-job: parameters: path: - library_id: "51b29371-de8f-4ba4-932b-a0bafb3a7f64" - document_id: "3052422c-49ca-45ac-a918-cadb35d61fd8" + job_id: "ee7d6f03-fcbb-43ca-8f17-0388c0832eb9" responses: - "422": - application/json: {} - libraries_share_list_v1: - speakeasy-default-libraries-share-list-v1: + "200": + application/json: {"id": "24b50383-3de5-4711-a14f-b71bbeccc6c5", "auto_start": true, "model": "Countach", "status": "CANCELLED", "created_at": 148194, "modified_at": 80833, "training_files": ["13ba2c85-5db5-4c14-94e4-2fcf030cecae", "85892e4f-5c84-4f38-bfb8-01072484489c", "723f89f0-65c0-43fa-9a9f-296acfe91134"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": ["", ""], "weight": 1717.09, "loss_function": "single_class"}]} + StartFineTuningJob: + speakeasy-default-start-fine-tuning-job: parameters: path: - library_id: "d2169833-d8e2-416e-a372-76518d3d99c2" + job_id: "da371429-0ec2-4cea-b9c7-73ce3a1dd76f" responses: "200": - application/json: {"data": [{"library_id": "26c08a7a-d226-4d29-b4d8-c08f0ad41dd1", "org_id": "254e3633-51b9-47a9-bc14-466ecf29d167", "role": "", "share_with_type": "", "share_with_uuid": "815eb88e-1f97-4782-863f-5fd00d37268b"}]} - "422": - application/json: {} - libraries_share_create_v1: - speakeasy-default-libraries-share-create-v1: + application/json: {"id": "2628c0c5-a98f-4d0b-a22a-fba0b0b23112", "auto_start": false, "model": "Model 3", "status": "QUEUED", "created_at": 139851, "modified_at": 571341, "training_files": ["856f394d-d216-41ab-8fa1-a42fba9e7734"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + ListBatchJobs: + speakeasy-default-list-batch-jobs: parameters: - path: - library_id: "36de3a24-5b1c-4c8f-9d84-d5642205a976" + query: + page: 0 + page_size: 100 + created_by_me: false + order_by: "-created" + responses: + "200": + application/json: {"object": "list", "total": 853018} + CreateBatchJob: + speakeasy-default-create-batch-job: requestBody: - application/json: {"org_id": "aadd9ae1-f285-4437-884a-091c77efa6fd", "level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} + application/json: {"endpoint": "/v1/classifications", "model": "mistral-small-latest", "timeout_hours": 24} responses: "200": - application/json: {"library_id": "45b3a5b2-8b81-4453-9130-ded7f1e5a366", "org_id": "0fa6e542-f04b-431e-a1be-76a9a92b0e68", "role": "", "share_with_type": "", "share_with_uuid": "cdbcc0c5-e577-4880-8ed3-f919421d4fc5"} - "422": - application/json: {} - libraries_share_delete_v1: - speakeasy-default-libraries-share-delete-v1: + application/json: {"id": "", "object": "batch", "input_files": ["936962bc-f885-485f-914e-fe90c1d312f9", "02e65e71-6f9f-4c39-9a54-bba0acb1e912", "4c5d848d-d86e-43cb-a795-1eaba0c96608"], "endpoint": "", "errors": [], "status": "SUCCESS", "created_at": 346291, "total_requests": 784915, "completed_requests": 663597, "succeeded_requests": 195848, "failed_requests": 688098} + GetBatchJob: + speakeasy-default-get-batch-job: parameters: path: - library_id: "709e3cad-9fb2-4f4e-bf88-143cf1808107" - requestBody: - application/json: {"org_id": "0814a235-c2d0-4814-875a-4b85f93d3dc7", "share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} + job_id: "358c80a1-79bd-43f0-8f0e-8186713aa3ba" responses: "200": - application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} - "422": - application/json: {} - audio_api_v1_transcriptions_post: - speakeasy-default-audio-api-v1-transcriptions-post: - requestBody: - multipart/form-data: {"model": "Model X", "stream": false} + application/json: {"id": "", "object": "batch", "input_files": ["782a7fa0-6ea1-4be9-bce9-9ff61f81530d"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 878725, "total_requests": 913781, "completed_requests": 964506, "succeeded_requests": 119373, "failed_requests": 490093} + CancelBatchJob: + speakeasy-default-cancel-batch-job: + parameters: + path: + job_id: "393537d7-8b33-4931-a289-7f61f8757eda" responses: "200": - application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} - audio_api_v1_transcriptions_post_stream: - speakeasy-default-audio-api-v1-transcriptions-post-stream: - requestBody: - multipart/form-data: {"model": "Camry", "stream": true} + application/json: {"id": "", "object": "batch", "input_files": ["7309e534-200e-43a4-83c5-dc4c2a14c745"], "endpoint": "", "errors": [], "status": "FAILED", "created_at": 157212, "total_requests": 188914, "completed_requests": 685483, "succeeded_requests": 127060, "failed_requests": 428561} + agents_api_v1_agents_delete_alias: + speakeasy-default-agents-api-v1-agents-delete-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} +generatedFiles: + - .gitattributes + - .vscode/settings.json + - USAGE.md + - docs/models/agent.md + - docs/models/agentconversation.md + - docs/models/agentconversationobject.md + - docs/models/agentcreationrequest.md + - docs/models/agentcreationrequesttools.md + - docs/models/agenthandoffdoneevent.md + - docs/models/agenthandoffdoneeventtype.md + - docs/models/agenthandoffentry.md + - docs/models/agenthandoffentryobject.md + - docs/models/agenthandoffentrytype.md + - docs/models/agenthandoffstartedevent.md + - docs/models/agenthandoffstartedeventtype.md + - docs/models/agentobject.md + - docs/models/agentsapiv1agentsdeleterequest.md + - docs/models/agentsapiv1agentsgetrequest.md + - docs/models/agentsapiv1agentslistrequest.md + - docs/models/agentsapiv1agentsupdaterequest.md + - docs/models/agentsapiv1agentsupdateversionrequest.md + - docs/models/agentsapiv1conversationsappendrequest.md + - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsdeleterequest.md + - docs/models/agentsapiv1conversationsgetrequest.md + - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md + - docs/models/agentsapiv1conversationshistoryrequest.md + - docs/models/agentsapiv1conversationslistrequest.md + - docs/models/agentsapiv1conversationsmessagesrequest.md + - docs/models/agentsapiv1conversationsrestartrequest.md + - docs/models/agentsapiv1conversationsrestartstreamrequest.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/agenttools.md + - docs/models/agentupdaterequest.md + - docs/models/agentupdaterequesttools.md + - docs/models/apiendpoint.md + - docs/models/archiveftmodelout.md + - docs/models/archiveftmodeloutobject.md + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/attributes.md + - docs/models/audiochunk.md + - docs/models/audiochunktype.md + - docs/models/audiotranscriptionrequest.md + - docs/models/audiotranscriptionrequeststream.md + - docs/models/basemodelcard.md + - docs/models/basemodelcardtype.md + - docs/models/batcherror.md + - docs/models/batchjobin.md + - docs/models/batchjobout.md + - docs/models/batchjoboutobject.md + - docs/models/batchjobsout.md + - docs/models/batchjobsoutobject.md + - docs/models/batchjobstatus.md + - docs/models/builtinconnectors.md + - docs/models/chatclassificationrequest.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatmoderationrequest.md + - docs/models/chatmoderationrequestinputs.md + - docs/models/checkpointout.md + - docs/models/classificationrequest.md + - docs/models/classificationrequestinputs.md + - docs/models/classificationresponse.md + - docs/models/classificationtargetresult.md + - docs/models/classifierdetailedjobout.md + - docs/models/classifierdetailedjoboutintegrations.md + - docs/models/classifierdetailedjoboutjobtype.md + - docs/models/classifierdetailedjoboutobject.md + - docs/models/classifierdetailedjoboutstatus.md + - docs/models/classifierftmodelout.md + - docs/models/classifierftmodeloutmodeltype.md + - docs/models/classifierftmodeloutobject.md + - docs/models/classifierjobout.md + - docs/models/classifierjoboutintegrations.md + - docs/models/classifierjoboutjobtype.md + - docs/models/classifierjoboutobject.md + - docs/models/classifierjoboutstatus.md + - docs/models/classifiertargetin.md + - docs/models/classifiertargetout.md + - docs/models/classifiertrainingparameters.md + - docs/models/classifiertrainingparametersin.md + - docs/models/codeinterpretertool.md + - docs/models/codeinterpretertooltype.md + - docs/models/completionargs.md + - docs/models/completionargsstop.md + - docs/models/completionchunk.md + - docs/models/completiondetailedjobout.md + - docs/models/completiondetailedjoboutintegrations.md + - docs/models/completiondetailedjoboutjobtype.md + - docs/models/completiondetailedjoboutobject.md + - docs/models/completiondetailedjoboutrepositories.md + - docs/models/completiondetailedjoboutstatus.md + - docs/models/completionevent.md + - docs/models/completionftmodelout.md + - docs/models/completionftmodeloutobject.md + - docs/models/completionjobout.md + - docs/models/completionjoboutobject.md + - docs/models/completionresponsestreamchoice.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completiontrainingparameters.md + - docs/models/completiontrainingparametersin.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/conversationappendrequest.md + - docs/models/conversationappendrequesthandoffexecution.md + - docs/models/conversationappendstreamrequest.md + - docs/models/conversationappendstreamrequesthandoffexecution.md + - docs/models/conversationevents.md + - docs/models/conversationeventsdata.md + - docs/models/conversationhistory.md + - docs/models/conversationhistoryobject.md + - docs/models/conversationinputs.md + - docs/models/conversationmessages.md + - docs/models/conversationmessagesobject.md + - docs/models/conversationrequest.md + - docs/models/conversationresponse.md + - docs/models/conversationresponseobject.md + - docs/models/conversationrestartrequest.md + - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequest.md + - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequest.md + - docs/models/conversationstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequesttools.md + - docs/models/conversationusageinfo.md + - docs/models/data.md + - docs/models/deletefileout.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documentlibrarytool.md + - docs/models/documentlibrarytooltype.md + - docs/models/documentout.md + - docs/models/documenttextcontent.md + - docs/models/documentupdatein.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/embeddingdtype.md + - docs/models/embeddingrequest.md + - docs/models/embeddingrequestinputs.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/encodingformat.md + - docs/models/entitytype.md + - docs/models/entries.md + - docs/models/eventout.md + - docs/models/file.md + - docs/models/filechunk.md + - docs/models/filepurpose.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapiroutesgetsignedurlrequest.md + - docs/models/filesapirouteslistfilesrequest.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/fileschema.md + - docs/models/filesignedurl.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodeltype.md + - docs/models/finishreason.md + - docs/models/format_.md + - docs/models/ftclassifierlossfunction.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functioncallentry.md + - docs/models/functioncallentryarguments.md + - docs/models/functioncallentryobject.md + - docs/models/functioncallentrytype.md + - docs/models/functioncallevent.md + - docs/models/functioncalleventtype.md + - docs/models/functionname.md + - docs/models/functionresultentry.md + - docs/models/functionresultentryobject.md + - docs/models/functionresultentrytype.md + - docs/models/functiontool.md + - docs/models/functiontooltype.md + - docs/models/githubrepositoryin.md + - docs/models/githubrepositoryintype.md + - docs/models/githubrepositoryout.md + - docs/models/githubrepositoryouttype.md + - docs/models/handoffexecution.md + - docs/models/httpvalidationerror.md + - docs/models/hyperparameters.md + - docs/models/imagegenerationtool.md + - docs/models/imagegenerationtooltype.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/inputentries.md + - docs/models/inputs.md + - docs/models/instructrequest.md + - docs/models/instructrequestinputs.md + - docs/models/instructrequestinputsmessages.md + - docs/models/instructrequestmessages.md + - docs/models/integrations.md + - docs/models/jobin.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md + - docs/models/jobmetadataout.md + - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md + - docs/models/jobsout.md + - docs/models/jobsoutdata.md + - docs/models/jobsoutobject.md + - docs/models/jobtype.md + - docs/models/jsonschema.md + - docs/models/legacyjobmetadataout.md + - docs/models/legacyjobmetadataoutobject.md + - docs/models/librariesdeletev1request.md + - docs/models/librariesdocumentsdeletev1request.md + - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md + - docs/models/librariesdocumentsgetsignedurlv1request.md + - docs/models/librariesdocumentsgetstatusv1request.md + - docs/models/librariesdocumentsgettextcontentv1request.md + - docs/models/librariesdocumentsgetv1request.md + - docs/models/librariesdocumentslistv1request.md + - docs/models/librariesdocumentsreprocessv1request.md + - docs/models/librariesdocumentsupdatev1request.md + - docs/models/librariesdocumentsuploadv1documentupload.md + - docs/models/librariesdocumentsuploadv1request.md + - docs/models/librariesgetv1request.md + - docs/models/librariessharecreatev1request.md + - docs/models/librariessharedeletev1request.md + - docs/models/librariessharelistv1request.md + - docs/models/librariesupdatev1request.md + - docs/models/libraryin.md + - docs/models/libraryinupdate.md + - docs/models/libraryout.md + - docs/models/listdocumentout.md + - docs/models/listfilesout.md + - docs/models/listlibraryout.md + - docs/models/listsharingout.md + - docs/models/loc.md + - docs/models/messageentries.md + - docs/models/messageinputcontentchunks.md + - docs/models/messageinputentry.md + - docs/models/messageinputentrycontent.md + - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md + - docs/models/messageoutputcontentchunks.md + - docs/models/messageoutputentry.md + - docs/models/messageoutputentrycontent.md + - docs/models/messageoutputentryobject.md + - docs/models/messageoutputentryrole.md + - docs/models/messageoutputentrytype.md + - docs/models/messageoutputevent.md + - docs/models/messageoutputeventcontent.md + - docs/models/messageoutputeventrole.md + - docs/models/messageoutputeventtype.md + - docs/models/messages.md + - docs/models/metricout.md + - docs/models/mistralpromptmode.md + - docs/models/modelcapabilities.md + - docs/models/modelconversation.md + - docs/models/modelconversationobject.md + - docs/models/modelconversationtools.md + - docs/models/modellist.md + - docs/models/modeltype.md + - docs/models/moderationobject.md + - docs/models/moderationresponse.md + - docs/models/name.md + - docs/models/object.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrtableobject.md + - docs/models/ocrusageinfo.md + - docs/models/one.md + - docs/models/outputcontentchunks.md + - docs/models/outputs.md + - docs/models/paginationinfo.md + - docs/models/prediction.md + - docs/models/processingstatusout.md + - docs/models/queryparamstatus.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/repositories.md + - docs/models/requestsource.md + - docs/models/response1.md + - docs/models/responsebody.md + - docs/models/responsedoneevent.md + - docs/models/responsedoneeventtype.md + - docs/models/responseerrorevent.md + - docs/models/responseerroreventtype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/responsestartedevent.md + - docs/models/responsestartedeventtype.md + - docs/models/retrievefileout.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md + - docs/models/role.md + - docs/models/sampletype.md + - docs/models/security.md + - docs/models/shareenum.md + - docs/models/sharingdelete.md + - docs/models/sharingin.md + - docs/models/sharingout.md + - docs/models/source.md + - docs/models/ssetypes.md + - docs/models/status.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/tableformat.md + - docs/models/textchunk.md + - docs/models/textchunktype.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/timestampgranularity.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventname.md + - docs/models/toolexecutiondeltaeventtype.md + - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventname.md + - docs/models/toolexecutiondoneeventtype.md + - docs/models/toolexecutionentry.md + - docs/models/toolexecutionentryobject.md + - docs/models/toolexecutionentrytype.md + - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventname.md + - docs/models/toolexecutionstartedeventtype.md + - docs/models/toolfilechunk.md + - docs/models/toolfilechunktype.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktype.md + - docs/models/tools.md + - docs/models/tooltypes.md + - docs/models/trainingfile.md + - docs/models/transcriptionresponse.md + - docs/models/transcriptionsegmentchunk.md + - docs/models/transcriptionstreamdone.md + - docs/models/transcriptionstreamdonetype.md + - docs/models/transcriptionstreamevents.md + - docs/models/transcriptionstreameventsdata.md + - docs/models/transcriptionstreameventtypes.md + - docs/models/transcriptionstreamlanguage.md + - docs/models/transcriptionstreamlanguagetype.md + - docs/models/transcriptionstreamsegmentdelta.md + - docs/models/transcriptionstreamsegmentdeltatype.md + - docs/models/transcriptionstreamtextdelta.md + - docs/models/transcriptionstreamtextdeltatype.md + - docs/models/two.md + - docs/models/type.md + - docs/models/unarchiveftmodelout.md + - docs/models/unarchiveftmodeloutobject.md + - docs/models/updateftmodelin.md + - docs/models/uploadfileout.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - docs/models/wandbintegration.md + - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationouttype.md + - docs/models/wandbintegrationtype.md + - docs/models/websearchpremiumtool.md + - docs/models/websearchpremiumtooltype.md + - docs/models/websearchtool.md + - docs/models/websearchtooltype.md + - docs/sdks/accesses/README.md + - docs/sdks/agents/README.md + - docs/sdks/audio/README.md + - docs/sdks/batch/README.md + - docs/sdks/beta/README.md + - docs/sdks/chat/README.md + - docs/sdks/classifiers/README.md + - docs/sdks/conversations/README.md + - docs/sdks/documents/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/files/README.md + - docs/sdks/fim/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/libraries/README.md + - docs/sdks/mistral/README.md + - docs/sdks/mistralagents/README.md + - docs/sdks/mistraljobs/README.md + - docs/sdks/models/README.md + - docs/sdks/ocr/README.md + - docs/sdks/transcriptions/README.md + - poetry.toml + - py.typed + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai/__init__.py + - src/mistralai/_hooks/__init__.py + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/_version.py + - src/mistralai/accesses.py + - src/mistralai/agents.py + - src/mistralai/audio.py + - src/mistralai/basesdk.py + - src/mistralai/batch.py + - src/mistralai/beta.py + - src/mistralai/chat.py + - src/mistralai/classifiers.py + - src/mistralai/conversations.py + - src/mistralai/documents.py + - src/mistralai/embeddings.py + - src/mistralai/files.py + - src/mistralai/fim.py + - src/mistralai/fine_tuning.py + - src/mistralai/httpclient.py + - src/mistralai/jobs.py + - src/mistralai/libraries.py + - src/mistralai/mistral_agents.py + - src/mistralai/mistral_jobs.py + - src/mistralai/models/__init__.py + - src/mistralai/models/agent.py + - src/mistralai/models/agentconversation.py + - src/mistralai/models/agentcreationrequest.py + - src/mistralai/models/agenthandoffdoneevent.py + - src/mistralai/models/agenthandoffentry.py + - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_deleteop.py + - src/mistralai/models/agents_api_v1_agents_getop.py + - src/mistralai/models/agents_api_v1_agents_listop.py + - src/mistralai/models/agents_api_v1_agents_update_versionop.py + - src/mistralai/models/agents_api_v1_agents_updateop.py + - src/mistralai/models/agents_api_v1_conversations_append_streamop.py + - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_deleteop.py + - src/mistralai/models/agents_api_v1_conversations_getop.py + - src/mistralai/models/agents_api_v1_conversations_historyop.py + - src/mistralai/models/agents_api_v1_conversations_listop.py + - src/mistralai/models/agents_api_v1_conversations_messagesop.py + - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py + - src/mistralai/models/agents_api_v1_conversations_restartop.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/agentupdaterequest.py + - src/mistralai/models/apiendpoint.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/audiochunk.py + - src/mistralai/models/audiotranscriptionrequest.py + - src/mistralai/models/audiotranscriptionrequeststream.py + - src/mistralai/models/basemodelcard.py + - src/mistralai/models/batcherror.py + - src/mistralai/models/batchjobin.py + - src/mistralai/models/batchjobout.py + - src/mistralai/models/batchjobsout.py + - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/builtinconnectors.py + - src/mistralai/models/chatclassificationrequest.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/chatmoderationrequest.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/classificationrequest.py + - src/mistralai/models/classificationresponse.py + - src/mistralai/models/classificationtargetresult.py + - src/mistralai/models/classifierdetailedjobout.py + - src/mistralai/models/classifierftmodelout.py + - src/mistralai/models/classifierjobout.py + - src/mistralai/models/classifiertargetin.py + - src/mistralai/models/classifiertargetout.py + - src/mistralai/models/classifiertrainingparameters.py + - src/mistralai/models/classifiertrainingparametersin.py + - src/mistralai/models/codeinterpretertool.py + - src/mistralai/models/completionargs.py + - src/mistralai/models/completionargsstop.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completiondetailedjobout.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionftmodelout.py + - src/mistralai/models/completionjobout.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/completiontrainingparameters.py + - src/mistralai/models/completiontrainingparametersin.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/conversationappendrequest.py + - src/mistralai/models/conversationappendstreamrequest.py + - src/mistralai/models/conversationevents.py + - src/mistralai/models/conversationhistory.py + - src/mistralai/models/conversationinputs.py + - src/mistralai/models/conversationmessages.py + - src/mistralai/models/conversationrequest.py + - src/mistralai/models/conversationresponse.py + - src/mistralai/models/conversationrestartrequest.py + - src/mistralai/models/conversationrestartstreamrequest.py + - src/mistralai/models/conversationstreamrequest.py + - src/mistralai/models/conversationusageinfo.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/documentlibrarytool.py + - src/mistralai/models/documentout.py + - src/mistralai/models/documenttextcontent.py + - src/mistralai/models/documentupdatein.py + - src/mistralai/models/documenturlchunk.py + - src/mistralai/models/embeddingdtype.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/encodingformat.py + - src/mistralai/models/entitytype.py + - src/mistralai/models/eventout.py + - src/mistralai/models/file.py + - src/mistralai/models/filechunk.py + - src/mistralai/models/filepurpose.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_get_signed_urlop.py + - src/mistralai/models/files_api_routes_list_filesop.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/filesignedurl.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodeltype.py + - src/mistralai/models/ftclassifierlossfunction.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/ftmodelcard.py + - src/mistralai/models/function.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/functioncallentry.py + - src/mistralai/models/functioncallentryarguments.py + - src/mistralai/models/functioncallevent.py + - src/mistralai/models/functionname.py + - src/mistralai/models/functionresultentry.py + - src/mistralai/models/functiontool.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imagegenerationtool.py + - src/mistralai/models/imageurl.py + - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputentries.py + - src/mistralai/models/inputs.py + - src/mistralai/models/instructrequest.py + - src/mistralai/models/jobin.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/jsonschema.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/libraries_delete_v1op.py + - src/mistralai/models/libraries_documents_delete_v1op.py + - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_status_v1op.py + - src/mistralai/models/libraries_documents_get_text_content_v1op.py + - src/mistralai/models/libraries_documents_get_v1op.py + - src/mistralai/models/libraries_documents_list_v1op.py + - src/mistralai/models/libraries_documents_reprocess_v1op.py + - src/mistralai/models/libraries_documents_update_v1op.py + - src/mistralai/models/libraries_documents_upload_v1op.py + - src/mistralai/models/libraries_get_v1op.py + - src/mistralai/models/libraries_share_create_v1op.py + - src/mistralai/models/libraries_share_delete_v1op.py + - src/mistralai/models/libraries_share_list_v1op.py + - src/mistralai/models/libraries_update_v1op.py + - src/mistralai/models/libraryin.py + - src/mistralai/models/libraryinupdate.py + - src/mistralai/models/libraryout.py + - src/mistralai/models/listdocumentout.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/listlibraryout.py + - src/mistralai/models/listsharingout.py + - src/mistralai/models/messageentries.py + - src/mistralai/models/messageinputcontentchunks.py + - src/mistralai/models/messageinputentry.py + - src/mistralai/models/messageoutputcontentchunks.py + - src/mistralai/models/messageoutputentry.py + - src/mistralai/models/messageoutputevent.py + - src/mistralai/models/metricout.py + - src/mistralai/models/mistralerror.py + - src/mistralai/models/mistralpromptmode.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modelconversation.py + - src/mistralai/models/modellist.py + - src/mistralai/models/moderationobject.py + - src/mistralai/models/moderationresponse.py + - src/mistralai/models/no_response_error.py + - src/mistralai/models/ocrimageobject.py + - src/mistralai/models/ocrpagedimensions.py + - src/mistralai/models/ocrpageobject.py + - src/mistralai/models/ocrrequest.py + - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrtableobject.py + - src/mistralai/models/ocrusageinfo.py + - src/mistralai/models/outputcontentchunks.py + - src/mistralai/models/paginationinfo.py + - src/mistralai/models/prediction.py + - src/mistralai/models/processingstatusout.py + - src/mistralai/models/referencechunk.py + - src/mistralai/models/requestsource.py + - src/mistralai/models/responsedoneevent.py + - src/mistralai/models/responseerrorevent.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/responseformats.py + - src/mistralai/models/responsestartedevent.py + - src/mistralai/models/responsevalidationerror.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/security.py + - src/mistralai/models/shareenum.py + - src/mistralai/models/sharingdelete.py + - src/mistralai/models/sharingin.py + - src/mistralai/models/sharingout.py + - src/mistralai/models/source.py + - src/mistralai/models/ssetypes.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/systemmessagecontentchunks.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/thinkchunk.py + - src/mistralai/models/timestampgranularity.py + - src/mistralai/models/tool.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/toolchoice.py + - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondeltaevent.py + - src/mistralai/models/toolexecutiondoneevent.py + - src/mistralai/models/toolexecutionentry.py + - src/mistralai/models/toolexecutionstartedevent.py + - src/mistralai/models/toolfilechunk.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/toolreferencechunk.py + - src/mistralai/models/tooltypes.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/transcriptionresponse.py + - src/mistralai/models/transcriptionsegmentchunk.py + - src/mistralai/models/transcriptionstreamdone.py + - src/mistralai/models/transcriptionstreamevents.py + - src/mistralai/models/transcriptionstreameventtypes.py + - src/mistralai/models/transcriptionstreamlanguage.py + - src/mistralai/models/transcriptionstreamsegmentdelta.py + - src/mistralai/models/transcriptionstreamtextdelta.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/websearchpremiumtool.py + - src/mistralai/models/websearchtool.py + - src/mistralai/models_.py + - src/mistralai/ocr.py + - src/mistralai/py.typed + - src/mistralai/sdk.py + - src/mistralai/sdkconfiguration.py + - src/mistralai/transcriptions.py + - src/mistralai/types/__init__.py + - src/mistralai/types/basemodel.py + - src/mistralai/utils/__init__.py + - src/mistralai/utils/annotations.py + - src/mistralai/utils/datetimes.py + - src/mistralai/utils/enums.py + - src/mistralai/utils/eventstreaming.py + - src/mistralai/utils/forms.py + - src/mistralai/utils/headers.py + - src/mistralai/utils/logger.py + - src/mistralai/utils/metadata.py + - src/mistralai/utils/queryparams.py + - src/mistralai/utils/requestbodies.py + - src/mistralai/utils/retries.py + - src/mistralai/utils/security.py + - src/mistralai/utils/serializers.py + - src/mistralai/utils/unmarshal_json_response.py + - src/mistralai/utils/url.py + - src/mistralai/utils/values.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index ffc6c827..e237388a 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -8,53 +8,83 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + versioningStrategy: automatic + persistentEdits: + enabled: "true" tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.3 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} + allowedRedefinedBuiltins: + - id + - object + - input + - dir + asyncMode: both authors: - Mistral baseErrorName: MistralError clientServerStatusCodesAsErrors: true + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. enableCustomCodeRegions: true enumFormat: union envVarPrefix: MISTRAL fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input - maxMethodParams: 15 + license: "" + maxMethodParams: 999 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.client + multipartArrayFormat: standard outputModelSuffix: output + packageManager: uv packageName: mistralai + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 45143669..b26cdf2b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,64 +1,66 @@ -speakeasyVersion: 1.568.2 +speakeasyVersion: 1.729.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f - sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 + sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 + sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e tags: - latest + - speakeasy-sdk-regen-1772041212 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 + sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 tags: - latest + - speakeasy-sdk-regen-1772041030 mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad - sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 + sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 + sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 tags: - latest - - speakeasy-sdk-regen-1753290410 + - speakeasy-sdk-regen-1772040743 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f - sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 + sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 + sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:e242a7fc42e44d2bbc8e5637d4a6455da7fb3d0307dc275ee4c64867f5c4be55 + codeSamplesRevisionDigest: sha256:68866aada6ad13253e32dab06e4876a7aeba4d7759683d81b2ba27f0fb55a342 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 + sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:03b3e82c20d10faa8622f14696632b96b1a2e8d747b266fff345061298d5f3e4 + codeSamplesRevisionDigest: sha256:35f30ba8ce4bd70f58b6abc5222d0bbf82eecc3109b09ca99df4406e363e21a0 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad - sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 + sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 + sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:1fd9897fdd851557c592b8fd46232518359401d15a6574933c43be63ec2edb53 + codeSamplesRevisionDigest: sha256:0bcecf3d1523375a194d6aa13116ffba291da8321e44b01399ae5e24f7ce2e33 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.568.2 + speakeasyVersion: 1.729.0 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 targets: mistralai-azure-sdk: target: python source: mistral-azure-source - output: ./packages/mistralai_azure + output: ./packages/azure publish: pypi: token: $pypi_token @@ -69,7 +71,7 @@ workflow: mistralai-gcp-sdk: target: python source: mistral-google-cloud-source - output: ./packages/mistralai_gcp + output: ./packages/gcp publish: pypi: token: $pypi_token diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index fe32bb3f..65d6d202 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,20 +1,20 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.568.2 +speakeasyVersion: 1.729.0 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 targets: mistralai-azure-sdk: target: python source: mistral-azure-source - output: ./packages/mistralai_azure + output: ./packages/azure publish: pypi: token: $pypi_token @@ -25,7 +25,7 @@ targets: mistralai-gcp-sdk: target: python source: mistral-google-cloud-source - output: ./packages/mistralai_gcp + output: ./packages/gcp publish: pypi: token: $pypi_token diff --git a/MIGRATION.md b/MIGRATION.md index 7ccdf9c0..2fc3d13d 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,242 +1,187 @@ +# Migration Guide -# Migration Guide for MistralAI Client from 0.\*.\* to 1.0.0 - -We have made significant changes to the `mistralai` library to improve its usability and consistency. This guide will help you migrate your code from the old client to the new one. - -## Major Changes - -1. **Unified Client Class**: - - The `MistralClient` and `MistralAsyncClient` classes have been consolidated into a single `Mistral` class. - - This simplifies the API by providing a single entry point for both synchronous and asynchronous operations. - -2. **Method Names and Structure**: - - The method names and structure have been updated for better clarity and consistency. - - For example: - - `client.chat` is now `client.chat.complete` for non-streaming calls - - `client.chat_stream` is now `client.chat.stream` for streaming calls - - Async `client.chat` is now `client.chat.complete_async` for async non-streaming calls - - Async `client.chat_stream` is now `client.chat.stream_async` for async streaming calls - - -## Method changes - -### Sync - -| Old Methods | New Methods | -| -------------------------- | -------------------------------- | -| `MistralCLient` | `Mistral` | -| `client.chat` | `client.chat.complete` | -| `client.chat_stream` | `client.chat.stream` | -| `client.completions` | `client.fim.complete` | -| `client.completions_stream`| `client.fim.stream` | -| `client.embeddings` | `client.embeddings.create` | -| `client.list_models` | `client.models.list` | -| `client.delete_model` | `client.models.delete` | -| `client.files.create` | `client.files.upload` | -| `client.files.list` | `client.files.list` | -| `client.files.retrieve` | `client.files.retrieve` | -| `client.files.delete` | `client.files.delete` | -| `client.jobs.create` | `client.fine_tuning.jobs.create` | -| `client.jobs.list` | `client.fine_tuning.jobs.list` | -| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | -| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | - -### Async - -| Old Methods | New Methods | -| -------------------------------- | -------------------------------------- | -| `MistralAsyncClient` | `Mistral` | -| `async_client.chat` | `client.chat.complete_async` | -| `async_client.chat_stream` | `client.chat.stream_async` | -| `async_client.completions` | `client.fim.complete_async` | -| `async_client.completions_stream`| `client.fim.stream_async` | -| `async_client.embeddings` | `client.embeddings.create_async` | -| `async_client.list_models` | `client.models.list_async` | -| `async_client.delete_model` | `client.models.delete_async` | -| `async_client.files.create` | `client.files.upload_async` | -| `async_client.files.list` | `client.files.list_async` | -| `async_client.files.retrieve` | `client.files.retrieve_async` | -| `async_client.files.delete` | `client.files.delete_async` | -| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | -| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | -| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | -| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | - -### Message Changes - -The `ChatMessage` class has been replaced with a more flexible system. You can now use the `SystemMessage`, `UserMessage`, `AssistantMessage`, and `ToolMessage` classes to create messages. - -The return object of the stream call methods have been modified to `chunk.data.choices[0].delta.content` from `chunk.choices[0].delta.content`. - -## Example Migrations - -### Example 1: Non-Streaming Chat - -**Old:** -```python -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +This guide covers migrating between major versions of the Mistral Python SDK. -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" +--- -client = MistralClient(api_key=api_key) +## Migrating from v1.x to v2.x -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] +Version 2.0 updates the import paths from `mistralai` to `mistralai.client`. -# No streaming -chat_response = client.chat( - model=model, - messages=messages, -) +### Import Changes -print(chat_response.choices[0].message.content) +All imports move from `mistralai` to `mistralai.client`: + +```python +# v1 +from mistralai import Mistral +from mistralai.models import UserMessage, AssistantMessage +from mistralai.types import BaseModel + +# v2 +from mistralai.client import Mistral +from mistralai.client.models import UserMessage, AssistantMessage +from mistralai.client.types import BaseModel ``` -**New:** +### Quick Reference -```python -import os +| v1 | v2 | +|---|---| +| `from mistralai import Mistral` | `from mistralai.client import Mistral` | +| `from mistralai.models import ...` | `from mistralai.client.models import ...` | +| `from mistralai.types import ...` | `from mistralai.client.types import ...` | +| `from mistralai.utils import ...` | `from mistralai.client.utils import ...` | -from mistralai import Mistral, UserMessage +### Azure & GCP Import Changes -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" +Azure and GCP SDKs now live under the `mistralai` namespace as separate distributions: -client = Mistral(api_key=api_key) +| v1 | v2 | +|---|---| +| `from mistralai_azure import MistralAzure` | `from mistralai.azure.client import MistralAzure` | +| `from mistralai_azure.models import ...` | `from mistralai.azure.client.models import ...` | +| `from mistralai_gcp import MistralGoogleCloud` | `from mistralai.gcp.client import MistralGCP` | +| `from mistralai_gcp.models import ...` | `from mistralai.gcp.client.models import ...` | -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] -# Or using the new message classes -# messages = [ -# UserMessage(content="What is the best French cheese?"), -# ] - -chat_response = client.chat.complete( - model=model, - messages=messages, -) - -print(chat_response.choices[0].message.content) -``` +#### Installation Changes + +For GCP authentication dependencies, use `pip install "mistralai[gcp]"`. + +### What Stays the Same + +- The `Mistral` client API is unchanged +- All models (`UserMessage`, `AssistantMessage`, etc.) work the same way + +### Enums -### Example 2: Streaming Chat +Enums now accept unknown values for forward compatibility with API changes. -**Old:** +--- +## Migrating from v0.x to v1.x + +Version 1.0 introduced significant changes to improve usability and consistency. + +> **Note:** The v1.x examples below use v1-style imports (e.g., `from mistralai import Mistral`). If you're on v2.x, combine these API changes with the [v1 to v2 import changes](#migrating-from-v1x-to-v2x) above. + +### Major Changes + +1. **Unified Client Class**: `MistralClient` and `MistralAsyncClient` consolidated into a single `Mistral` class +2. **Method Structure**: Methods reorganized into resource-based groups (e.g., `client.chat.complete()`) +3. **Message Classes**: `ChatMessage` replaced with typed classes (`UserMessage`, `AssistantMessage`, etc.) +4. **Streaming Response**: Stream chunks now accessed via `chunk.data.choices[0].delta.content` + +### Method Mapping + +#### Sync Methods + +| v0.x | v1.x | +|---|---| +| `MistralClient` | `Mistral` | +| `client.chat` | `client.chat.complete` | +| `client.chat_stream` | `client.chat.stream` | +| `client.completions` | `client.fim.complete` | +| `client.completions_stream` | `client.fim.stream` | +| `client.embeddings` | `client.embeddings.create` | +| `client.list_models` | `client.models.list` | +| `client.delete_model` | `client.models.delete` | +| `client.files.create` | `client.files.upload` | +| `client.jobs.create` | `client.fine_tuning.jobs.create` | +| `client.jobs.list` | `client.fine_tuning.jobs.list` | +| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | +| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | + +#### Async Methods + +| v0.x | v1.x | +|---|---| +| `MistralAsyncClient` | `Mistral` | +| `async_client.chat` | `client.chat.complete_async` | +| `async_client.chat_stream` | `client.chat.stream_async` | +| `async_client.completions` | `client.fim.complete_async` | +| `async_client.completions_stream` | `client.fim.stream_async` | +| `async_client.embeddings` | `client.embeddings.create_async` | +| `async_client.list_models` | `client.models.list_async` | +| `async_client.files.create` | `client.files.upload_async` | +| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | +| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | +| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | +| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | + +### Example: Non-Streaming Chat + +**v0.x:** ```python from mistralai.client import MistralClient from mistralai.models.chat_completion import ChatMessage -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - client = MistralClient(api_key=api_key) -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# With streaming -stream_response = client.chat_stream(model=model, messages=messages) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] +response = client.chat(model="mistral-large-latest", messages=messages) -for chunk in stream_response: - print(chunk.choices[0].delta.content) +print(response.choices[0].message.content) ``` -**New:** -```python -import os +**v1.x:** +```python from mistralai import Mistral, UserMessage -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - client = Mistral(api_key=api_key) -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] -# Or using the new message classes -# messages = [ -# UserMessage(content="What is the best French cheese?"), -# ] - -stream_response = client.chat.stream( - model=model, - messages=messages, -) - -for chunk in stream_response: - print(chunk.data.choices[0].delta.content) +messages = [UserMessage(content="What is the best French cheese?")] +response = client.chat.complete(model="mistral-large-latest", messages=messages) +print(response.choices[0].message.content) ``` -### Example 3: Async +### Example: Streaming Chat -**Old:** +**v0.x:** ```python -from mistralai.async_client import MistralAsyncClient +from mistralai.client import MistralClient from mistralai.models.chat_completion import ChatMessage -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" +client = MistralClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] -client = MistralAsyncClient(api_key=api_key) +for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] +for chunk in client.chat.stream(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) # Note: chunk.data +``` -# With async -async_response = client.chat_stream(model=model, messages=messages) +### Example: Async Streaming -async for chunk in async_response: +**v0.x:** +```python +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralAsyncClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +async for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): print(chunk.choices[0].delta.content) ``` -**New:** +**v1.x:** ```python -import asyncio -import os - from mistralai import Mistral, UserMessage +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] -async def main(): - client = Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - - messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, - ] - # Or using the new message classes - # messages = [ - # UserMessage( - # content="What is the best French cheese?", - # ), - # ] - async_response = await client.chat.stream_async( - messages=messages, - model="mistral-large-latest", - ) - - async for chunk in async_response: - print(chunk.data.choices[0].delta.content) - - -asyncio.run(main()) +async for chunk in await client.chat.stream_async(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) ``` diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..bba024ad --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +.PHONY: help generate test-generate update-speakeasy-version check-config + +help: + @echo "Available targets:" + @echo " make generate Generate all SDKs (main, Azure, GCP)" + @echo " make test-generate Test SDK generation locally" + @echo " make update-speakeasy-version VERSION=x.y.z Update Speakeasy CLI version" + @echo " make check-config Check gen.yaml against recommended defaults" + @echo "" + @echo "Note: Production SDK generation is done via GitHub Actions:" + @echo " .github/workflows/sdk_generation_mistralai_sdk.yaml" + +# Generate all SDKs (main, Azure, GCP) +generate: + speakeasy run -t all + +# Test SDK generation locally. +# For production, use GitHub Actions: .github/workflows/sdk_generation_mistralai_sdk.yaml +# This uses the Speakeasy CLI version defined in .speakeasy/workflow.yaml +test-generate: + speakeasy run --skip-versioning + +# Check gen.yaml configuration against Speakeasy recommended defaults +check-config: + speakeasy configure generation check + +# Update the Speakeasy CLI version (the code generator tool). +# This modifies speakeasyVersion in .speakeasy/workflow.yaml and regenerates the SDK. +# Usage: make update-speakeasy-version VERSION=1.685.0 +update-speakeasy-version: +ifndef VERSION + $(error VERSION is required. This is the Speakeasy CLI version (e.g., 1.685.0)) +endif + uv run inv update-speakeasy --version "$(VERSION)" --targets "all" diff --git a/README.md b/README.md index f71ccfcb..dd98b5cc 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,13 @@ # Mistral Python Client +> [!IMPORTANT] +> **Looking for v1 documentation?** If you installed `mistralai` from PyPI (e.g., `pip install mistralai`), you are using **v1** of the SDK. The documentation on this branch (`main`) is for **v2**, which is not yet released on PyPI. +> +> **➡️ [Go to the v1 branch for v1 documentation](https://github.com/mistralai/client-python/tree/v1)** + ## Migration warning - -This documentation is for Mistral AI SDK v1. You can find more details on how to migrate from v0 to v1 [here](MIGRATION.md) + +This documentation is for Mistral AI SDK v2. You can find more details on how to migrate from v1 to v2 [here](MIGRATION.md) ## API Key Setup @@ -58,7 +63,15 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo > > Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. -The SDK can be installed with either *pip* or *poetry* package managers. +The SDK can be installed with *uv*, *pip*, or *poetry* package managers. + +### uv + +*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. + +```bash +uv add mistralai +``` ### PIP @@ -89,13 +102,13 @@ It's also possible to write a standalone Python script without needing to set up ```python #!/usr/bin/env -S uv run --script # /// script -# requires-python = ">=3.9" +# requires-python = ">=3.10" # dependencies = [ # "mistralai", # ] # /// -from mistralai import Mistral +from mistralai.client import Mistral sdk = Mistral( # SDK arguments @@ -110,15 +123,22 @@ Once that is saved to a file, you can run it with `uv run script.py` where ### Agents extra dependencies -When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when +When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when installing the package: ```bash pip install "mistralai[agents]" ``` -> Note: Because of some of our dependencies, these features are only available for python version higher or equal to -> 3.10. +> Note: These features require Python 3.10+ (the SDK minimum). + +### Additional packages + +Additional `mistralai-*` packages (e.g. `mistralai-workflows`) can be installed separately and are available under the `mistralai` namespace: + +```bash +pip install mistralai-workflows +``` ## SDK Example Usage @@ -129,7 +149,7 @@ This example shows how to create chat completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -137,12 +157,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -150,11 +172,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -163,12 +186,14 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -182,7 +207,7 @@ This example shows how to upload a file. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -201,11 +226,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -231,7 +257,7 @@ This example shows how to create agents completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -241,10 +267,12 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -252,11 +280,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -267,10 +296,12 @@ async def main(): res = await mistral.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -284,7 +315,7 @@ This example shows how to create embedding request. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -303,11 +334,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -331,7 +363,7 @@ asyncio.run(main()) ### More examples -You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. +You can run the examples in the `examples/` directory using `uv run`. ## Providers' SDKs Example Usage @@ -340,38 +372,41 @@ You can run the examples in the `examples/` directory using `poetry run` or by e **Prerequisites** -Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). +**Step 1: Install** + +```bash +pip install mistralai +``` + +**Step 2: Example Usage** + Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/azure). ```python -import asyncio import os +from mistralai.azure.client import MistralAzure -from mistralai_azure import MistralAzure - +# The SDK automatically injects api-version as a query parameter client = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version="2024-05-01-preview", # Optional, this is the default ) -async def main() -> None: - res = await client.chat.complete_async( - max_tokens= 100, - temperature= 0.5, - messages= [ - { - "content": "Hello there!", - "role": "user" - } - ] - ) - print(res) - -asyncio.run(main()) +res = client.chat.complete( + model=os.environ["AZURE_MODEL"], + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -The documentation for the Azure SDK is available [here](packages/mistralai_azure/README.md). ### Google Cloud @@ -388,40 +423,43 @@ gcloud auth application-default login **Step 1: Install** -Install the extras dependencies specific to Google Cloud: - ```bash -pip install mistralai[gcp] +pip install mistralai +# For GCP authentication support (required): +pip install "mistralai[gcp]" ``` **Step 2: Example Usage** -Here's a basic example to get you started. +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/gcp). -```python -import asyncio -from mistralai_gcp import MistralGoogleCloud - -client = MistralGoogleCloud() +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` +```python +import os +from mistralai.gcp.client import MistralGCP -async def main() -> None: - res = await client.chat.complete_async( - model= "mistral-small-2402", - messages= [ - { - "content": "Hello there!", - "role": "user" - } - ] - ) - print(res) +# The SDK auto-detects credentials and builds the Vertex AI URL +client = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region="us-central1", # Default: europe-west4 +) -asyncio.run(main()) +res = client.chat.complete( + model="mistral-small-2503", + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -The documentation for the GCP SDK is available [here](packages/mistralai_gcp/README.md). - ## Available Resources and Operations @@ -429,45 +467,43 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA
Available methods -### [agents](docs/sdks/agents/README.md) +### [Agents](docs/sdks/agents/README.md) * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion -### [audio](docs/sdks/audio/README.md) - - -#### [audio.transcriptions](docs/sdks/transcriptions/README.md) +### [Audio.Transcriptions](docs/sdks/transcriptions/README.md) * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription -* [stream](docs/sdks/transcriptions/README.md#stream) - Create streaming transcription (SSE) - -### [batch](docs/sdks/batch/README.md) - - -#### [batch.jobs](docs/sdks/mistraljobs/README.md) +* [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) -* [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs -* [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job -* [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job -* [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job +### [Batch.Jobs](docs/sdks/batchjobs/README.md) -### [beta](docs/sdks/beta/README.md) +* [list](docs/sdks/batchjobs/README.md#list) - Get Batch Jobs +* [create](docs/sdks/batchjobs/README.md#create) - Create Batch Job +* [get](docs/sdks/batchjobs/README.md#get) - Get Batch Job +* [cancel](docs/sdks/batchjobs/README.md#cancel) - Cancel Batch Job +### [Beta.Agents](docs/sdks/betaagents/README.md) -#### [beta.agents](docs/sdks/mistralagents/README.md) +* [create](docs/sdks/betaagents/README.md#create) - Create a agent that can be used within a conversation. +* [list](docs/sdks/betaagents/README.md#list) - List agent entities. +* [get](docs/sdks/betaagents/README.md#get) - Retrieve an agent entity. +* [update](docs/sdks/betaagents/README.md#update) - Update an agent entity. +* [delete](docs/sdks/betaagents/README.md#delete) - Delete an agent entity. +* [update_version](docs/sdks/betaagents/README.md#update_version) - Update an agent version. +* [list_versions](docs/sdks/betaagents/README.md#list_versions) - List all versions of an agent. +* [get_version](docs/sdks/betaagents/README.md#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](docs/sdks/betaagents/README.md#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](docs/sdks/betaagents/README.md#delete_version_alias) - Delete an agent version alias. -* [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. -* [list](docs/sdks/mistralagents/README.md#list) - List agent entities. -* [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. -* [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. -* [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. - -#### [beta.conversations](docs/sdks/conversations/README.md) +### [Beta.Conversations](docs/sdks/conversations/README.md) * [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. * [list](docs/sdks/conversations/README.md#list) - List all created conversations. * [get](docs/sdks/conversations/README.md#get) - Retrieve a conversation information. +* [delete](docs/sdks/conversations/README.md#delete) - Delete a conversation. * [append](docs/sdks/conversations/README.md#append) - Append new entries to an existing conversation. * [get_history](docs/sdks/conversations/README.md#get_history) - Retrieve all entries in a conversation. * [get_messages](docs/sdks/conversations/README.md#get_messages) - Retrieve all messages in a conversation. @@ -476,7 +512,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. * [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. -#### [beta.libraries](docs/sdks/libraries/README.md) +### [Beta.Libraries](docs/sdks/libraries/README.md) * [list](docs/sdks/libraries/README.md#list) - List all libraries you have access to. * [create](docs/sdks/libraries/README.md#create) - Create a new Library. @@ -484,15 +520,15 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [delete](docs/sdks/libraries/README.md#delete) - Delete a library and all of it's document. * [update](docs/sdks/libraries/README.md#update) - Update a library. -#### [beta.libraries.accesses](docs/sdks/accesses/README.md) +#### [Beta.Libraries.Accesses](docs/sdks/accesses/README.md) * [list](docs/sdks/accesses/README.md#list) - List all of the access to this library. * [update_or_create](docs/sdks/accesses/README.md#update_or_create) - Create or update an access level. * [delete](docs/sdks/accesses/README.md#delete) - Delete an access level. -#### [beta.libraries.documents](docs/sdks/documents/README.md) +#### [Beta.Libraries.Documents](docs/sdks/documents/README.md) -* [list](docs/sdks/documents/README.md#list) - List document in a given library. +* [list](docs/sdks/documents/README.md#list) - List documents in a given library. * [upload](docs/sdks/documents/README.md#upload) - Upload a new document. * [get](docs/sdks/documents/README.md#get) - Retrieve the metadata of a specific document. * [update](docs/sdks/documents/README.md#update) - Update the metadata of a specific document. @@ -503,23 +539,23 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [extracted_text_signed_url](docs/sdks/documents/README.md#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. * [reprocess](docs/sdks/documents/README.md#reprocess) - Reprocess a document. -### [chat](docs/sdks/chat/README.md) +### [Chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -### [classifiers](docs/sdks/classifiers/README.md) +### [Classifiers](docs/sdks/classifiers/README.md) * [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations * [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations * [classify](docs/sdks/classifiers/README.md#classify) - Classifications * [classify_chat](docs/sdks/classifiers/README.md#classify_chat) - Chat Classifications -### [embeddings](docs/sdks/embeddings/README.md) +### [Embeddings](docs/sdks/embeddings/README.md) * [create](docs/sdks/embeddings/README.md#create) - Embeddings -### [files](docs/sdks/files/README.md) +### [Files](docs/sdks/files/README.md) * [upload](docs/sdks/files/README.md#upload) - Upload File * [list](docs/sdks/files/README.md#list) - List Files @@ -528,24 +564,20 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [download](docs/sdks/files/README.md#download) - Download File * [get_signed_url](docs/sdks/files/README.md#get_signed_url) - Get Signed Url -### [fim](docs/sdks/fim/README.md) +### [Fim](docs/sdks/fim/README.md) * [complete](docs/sdks/fim/README.md#complete) - Fim Completion * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -### [fine_tuning](docs/sdks/finetuning/README.md) - - -#### [fine_tuning.jobs](docs/sdks/jobs/README.md) +### [FineTuning.Jobs](docs/sdks/finetuningjobs/README.md) -* [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs -* [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job -* [get](docs/sdks/jobs/README.md#get) - Get Fine Tuning Job -* [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job -* [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job +* [list](docs/sdks/finetuningjobs/README.md#list) - Get Fine Tuning Jobs +* [create](docs/sdks/finetuningjobs/README.md#create) - Create Fine Tuning Job +* [get](docs/sdks/finetuningjobs/README.md#get) - Get Fine Tuning Job +* [cancel](docs/sdks/finetuningjobs/README.md#cancel) - Cancel Fine Tuning Job +* [start](docs/sdks/finetuningjobs/README.md#start) - Start Fine Tuning Job - -### [models](docs/sdks/models/README.md) +### [Models](docs/sdks/models/README.md) * [list](docs/sdks/models/README.md#list) - List Models * [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model @@ -554,7 +586,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model * [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model -### [ocr](docs/sdks/ocr/README.md) +### [Ocr](docs/sdks/ocr/README.md) * [process](docs/sdks/ocr/README.md#process) - OCR @@ -574,7 +606,7 @@ The stream is also a [Context Manager][context-manager] and can be used with the underlying connection when the context is exited. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -589,7 +621,11 @@ with Mistral( "tool_call_id": "", "result": "", }, - ], stream=True) + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -614,7 +650,7 @@ Certain SDK methods accept file objects as part of a request body or multi-part > ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -640,8 +676,8 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai import Mistral -from mistralai.utils import BackoffStrategy, RetryConfig +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig import os @@ -659,8 +695,8 @@ with Mistral( If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai import Mistral -from mistralai.utils import BackoffStrategy, RetryConfig +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig import os @@ -680,28 +716,20 @@ with Mistral( ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an exception. +[`MistralError`](./src/mistralai/client/errors/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: -By default, an API error will raise a models.SDKError exception, which has the following properties: - -| Property | Type | Description | -|-----------------|------------------|-----------------------| -| `.status_code` | *int* | The HTTP status code | -| `.message` | *str* | The error message | -| `.raw_response` | *httpx.Response* | The raw HTTP response | -| `.body` | *str* | The response content | - -When custom error responses are specified for an operation, the SDK may also raise their associated exceptions. You can refer to respective *Errors* tables in SDK docs for more details on possible exception types for each operation. For example, the `list_async` method may raise the following exceptions: - -| Error Type | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| Property | Type | Description | +| ------------------ | ---------------- | --------------------------------------------------------------------------------------- | +| `err.message` | `str` | Error message | +| `err.status_code` | `int` | HTTP response status code eg `404` | +| `err.headers` | `httpx.Headers` | HTTP response headers | +| `err.body` | `str` | HTTP body. Can be empty string if no body is returned. | +| `err.raw_response` | `httpx.Response` | Raw HTTP response | +| `err.data` | | Optional. Some errors may contain structured data. [See Error Classes](#error-classes). | ### Example - ```python -from mistralai import Mistral, models +from mistralai.client import Mistral, errors import os @@ -711,18 +739,46 @@ with Mistral( res = None try: - res = mistral.models.list() + res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") # Handle response print(res) - except models.HTTPValidationError as e: - # handle e.data: models.HTTPValidationErrorData - raise(e) - except models.SDKError as e: - # handle exception - raise(e) + + except errors.MistralError as e: + # The base class for HTTP error responses + print(e.message) + print(e.status_code) + print(e.body) + print(e.headers) + print(e.raw_response) + + # Depending on the method different errors may be thrown + if isinstance(e, errors.HTTPValidationError): + print(e.data.detail) # Optional[List[models.ValidationError]] ``` + +### Error Classes +**Primary error:** +* [`MistralError`](./src/mistralai/client/errors/mistralerror.py): The base class for HTTP error responses. + +
Less common errors (6) + +
+ +**Network errors:** +* [`httpx.RequestError`](https://www.python-httpx.org/exceptions/#httpx.RequestError): Base class for request errors. + * [`httpx.ConnectError`](https://www.python-httpx.org/exceptions/#httpx.ConnectError): HTTP client was unable to make a request to a server. + * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. + + +**Inherit from [`MistralError`](./src/mistralai/client/errors/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* +* [`ResponseValidationError`](./src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. + +
+ +\* Check [the method documentation](#available-resources-and-operations) to see if the error is applicable. @@ -739,7 +795,7 @@ You can override the default server globally by passing a server name to the `se #### Example ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -759,7 +815,7 @@ with Mistral( The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -785,7 +841,7 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai import Mistral +from mistralai.client import Mistral import httpx http_client = httpx.Client(headers={"x-custom-header": "someValue"}) @@ -794,8 +850,8 @@ s = Mistral(client=http_client) or you could wrap the client with your own custom logic: ```python -from mistralai import Mistral -from mistralai.httpclient import AsyncHttpClient +from mistralai.client import Mistral +from mistralai.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -870,7 +926,7 @@ This SDK supports the following security scheme globally: To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -894,7 +950,7 @@ The `Mistral` class implements the context manager protocol and registers a fina [context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers ```python -from mistralai import Mistral +from mistralai.client import Mistral import os def main(): @@ -921,11 +977,11 @@ You can setup your SDK to emit debug logs for SDK requests and responses. You can pass your own logger class directly into your SDK. ```python -from mistralai import Mistral +from mistralai.client import Mistral import logging logging.basicConfig(level=logging.DEBUG) -s = Mistral(debug_logger=logging.getLogger("mistralai")) +s = Mistral(debug_logger=logging.getLogger("mistralai.client")) ``` You can also enable a default debug logger by setting an environment variable `MISTRAL_DEBUG` to true. @@ -948,4 +1004,4 @@ Generally, the SDK will work well with most IDEs out of the box. However, when u ## Contributions While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. \ No newline at end of file +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/RELEASES.md b/RELEASES.md index 2089bb04..48b65760 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -268,4 +268,114 @@ Based on: ### Generated - [python v1.9.3] . ### Releases -- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . \ No newline at end of file +- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . + +## 2025-08-13 07:21:11 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.6] . +### Releases +- [PyPI v1.9.6] https://pypi.org/project/mistralai/1.9.6 - . + +## 2025-08-20 08:28:00 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.7] . +### Releases +- [PyPI v1.9.7] https://pypi.org/project/mistralai/1.9.7 - . + +## 2025-08-25 14:54:06 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.8] . +### Releases +- [PyPI v1.9.8] https://pypi.org/project/mistralai/1.9.8 - . + +## 2025-08-26 17:34:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.9] . +### Releases +- [PyPI v1.9.9] https://pypi.org/project/mistralai/1.9.9 - . + +## 2025-09-02 07:02:26 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.10] . +### Releases +- [PyPI v1.9.10] https://pypi.org/project/mistralai/1.9.10 - . + +## 2025-10-02 15:48:02 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.11] . +### Releases +- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . + +## 2025-12-16 19:44:09 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.0] . +### Releases +- [PyPI v1.10.0] https://pypi.org/project/mistralai/1.10.0 - . + +## 2026-01-15 18:39:22 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.1] . +### Releases +- [PyPI v1.10.1] https://pypi.org/project/mistralai/1.10.1 - . + +## 2026-01-22 11:16:25 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.11.1] . +### Releases +- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . + +## 2026-02-01 21:20:42 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.12.0] . +### Releases +- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . + +## 2026-02-25 17:32:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] . +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index d2bba404..f71bbabc 100644 --- a/USAGE.md +++ b/USAGE.md @@ -5,7 +5,7 @@ This example shows how to create chat completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -13,12 +13,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -26,11 +28,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -39,12 +42,14 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -58,7 +63,7 @@ This example shows how to upload a file. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -77,11 +82,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -107,7 +113,7 @@ This example shows how to create agents completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -117,10 +123,12 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -128,11 +136,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -143,10 +152,12 @@ async def main(): res = await mistral.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -160,7 +171,7 @@ This example shows how to create embedding request. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -179,11 +190,12 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): diff --git a/docs/models/httpvalidationerror.md b/docs/errors/httpvalidationerror.md similarity index 100% rename from docs/models/httpvalidationerror.md rename to docs/errors/httpvalidationerror.md diff --git a/docs/models/agent.md b/docs/models/agent.md index 686fae75..4de5a901 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -6,14 +6,19 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentTools](../models/agenttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.AgentTool](../models/agenttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["agent"]]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `version` | *int* | :heavy_check_mark: | N/A | +| `versions` | List[*int*] | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deployment_chat` | *bool* | :heavy_check_mark: | N/A | +| `source` | *str* | :heavy_check_mark: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentaliasresponse.md b/docs/models/agentaliasresponse.md new file mode 100644 index 00000000..aa531ec5 --- /dev/null +++ b/docs/models/agentaliasresponse.md @@ -0,0 +1,11 @@ +# AgentAliasResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index 772cc80e..451f6fb8 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -3,12 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentConversationAgentVersion]](../models/agentconversationagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversationagentversion.md b/docs/models/agentconversationagentversion.md new file mode 100644 index 00000000..668a8dc0 --- /dev/null +++ b/docs/models/agentconversationagentversion.md @@ -0,0 +1,17 @@ +# AgentConversationAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/agentconversationobject.md b/docs/models/agentconversationobject.md deleted file mode 100644 index ea7cc75c..00000000 --- a/docs/models/agentconversationobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentConversationObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md deleted file mode 100644 index 34060d9a..00000000 --- a/docs/models/agentcreationrequest.md +++ /dev/null @@ -1,14 +0,0 @@ -# AgentCreationRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTools](../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttools.md b/docs/models/agentcreationrequesttools.md deleted file mode 100644 index c2525850..00000000 --- a/docs/models/agentcreationrequesttools.md +++ /dev/null @@ -1,41 +0,0 @@ -# AgentCreationRequestTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md index c0039f41..6bfcc3d8 100644 --- a/docs/models/agenthandoffdoneevent.md +++ b/docs/models/agenthandoffdoneevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `type` | [Optional[models.AgentHandoffDoneEventType]](../models/agenthandoffdoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffdoneeventtype.md b/docs/models/agenthandoffdoneeventtype.md deleted file mode 100644 index c864ce43..00000000 --- a/docs/models/agenthandoffdoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffDoneEventType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `AGENT_HANDOFF_DONE` | agent.handoff.done | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md index 8831b0eb..2b689ec7 100644 --- a/docs/models/agenthandoffentry.md +++ b/docs/models/agenthandoffentry.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.AgentHandoffEntryObject]](../models/agenthandoffentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.AgentHandoffEntryType]](../models/agenthandoffentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["agent.handoff"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentryobject.md b/docs/models/agenthandoffentryobject.md deleted file mode 100644 index 4bb876fb..00000000 --- a/docs/models/agenthandoffentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/agenthandoffentrytype.md b/docs/models/agenthandoffentrytype.md deleted file mode 100644 index 527ebceb..00000000 --- a/docs/models/agenthandoffentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `AGENT_HANDOFF` | agent.handoff | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md index 035cd02a..518b5a0c 100644 --- a/docs/models/agenthandoffstartedevent.md +++ b/docs/models/agenthandoffstartedevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `type` | [Optional[models.AgentHandoffStartedEventType]](../models/agenthandoffstartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedeventtype.md b/docs/models/agenthandoffstartedeventtype.md deleted file mode 100644 index 4ffaff15..00000000 --- a/docs/models/agenthandoffstartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffStartedEventType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `AGENT_HANDOFF_STARTED` | agent.handoff.started | \ No newline at end of file diff --git a/docs/models/agentobject.md b/docs/models/agentobject.md deleted file mode 100644 index 70e143b0..00000000 --- a/docs/models/agentobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `AGENT` | agent | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md new file mode 100644 index 00000000..79406434 --- /dev/null +++ b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsCreateOrUpdateAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeletealiasrequest.md b/docs/models/agentsapiv1agentsdeletealiasrequest.md new file mode 100644 index 00000000..8e95c0c3 --- /dev/null +++ b/docs/models/agentsapiv1agentsdeletealiasrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsDeleteAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeleterequest.md b/docs/models/agentsapiv1agentsdeleterequest.md new file mode 100644 index 00000000..2799f418 --- /dev/null +++ b/docs/models/agentsapiv1agentsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetagentversion.md b/docs/models/agentsapiv1agentsgetagentversion.md new file mode 100644 index 00000000..7fb9f2d5 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetagentversion.md @@ -0,0 +1,17 @@ +# AgentsAPIV1AgentsGetAgentVersion + + +## Supported Types + +### `int` + +```python +value: int = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index b46ac23d..ceffe009 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md new file mode 100644 index 00000000..96a73589 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsGetVersionRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md index b5bcee62..4785a54c 100644 --- a/docs/models/agentsapiv1agentslistrequest.md +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -3,7 +3,13 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionaliasesrequest.md b/docs/models/agentsapiv1agentslistversionaliasesrequest.md new file mode 100644 index 00000000..3083bf92 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionaliasesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsListVersionAliasesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionsrequest.md b/docs/models/agentsapiv1agentslistversionsrequest.md new file mode 100644 index 00000000..91831700 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionsrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsListVersionsRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of versions per page | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/agentsapiv1agentsupdaterequest.md index f60f8e5b..7ef60bec 100644 --- a/docs/models/agentsapiv1agentsupdaterequest.md +++ b/docs/models/agentsapiv1agentsupdaterequest.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_update_request` | [models.AgentUpdateRequest](../models/agentupdaterequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| `update_agent_request` | [models.UpdateAgentRequest](../models/updateagentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsdeleterequest.md b/docs/models/agentsapiv1conversationsdeleterequest.md new file mode 100644 index 00000000..c6eed281 --- /dev/null +++ b/docs/models/agentsapiv1conversationsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md b/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md deleted file mode 100644 index 4bc836f3..00000000 --- a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md +++ /dev/null @@ -1,19 +0,0 @@ -# AgentsAPIV1ConversationsGetResponseV1ConversationsGet - -Successful Response - - -## Supported Types - -### `models.ModelConversation` - -```python -value: models.ModelConversation = /* values here */ -``` - -### `models.AgentConversation` - -```python -value: models.AgentConversation = /* values here */ -``` - diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/agentsapiv1conversationslistrequest.md index 528a055a..62c9011f 100644 --- a/docs/models/agentsapiv1conversationslistrequest.md +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -6,4 +6,5 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistresponse.md b/docs/models/agentsapiv1conversationslistresponse.md new file mode 100644 index 00000000..b233ee20 --- /dev/null +++ b/docs/models/agentsapiv1conversationslistresponse.md @@ -0,0 +1,17 @@ +# AgentsAPIV1ConversationsListResponse + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 73615ed9..33435732 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -3,20 +3,21 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionRequestMessage](../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionrequestmessage.md b/docs/models/agentscompletionrequestmessage.md new file mode 100644 index 00000000..957703b5 --- /dev/null +++ b/docs/models/agentscompletionrequestmessage.md @@ -0,0 +1,29 @@ +# AgentsCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/agentscompletionrequestmessages.md deleted file mode 100644 index d6a1e691..00000000 --- a/docs/models/agentscompletionrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# AgentsCompletionRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index b0aac6c1..407be8e0 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -3,20 +3,21 @@ ## Fields -| Field | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequestmessage.md b/docs/models/agentscompletionstreamrequestmessage.md new file mode 100644 index 00000000..6ccf4244 --- /dev/null +++ b/docs/models/agentscompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# AgentsCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md deleted file mode 100644 index 1bc736af..00000000 --- a/docs/models/agentscompletionstreamrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# AgentsCompletionStreamRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/agenttool.md b/docs/models/agenttool.md new file mode 100644 index 00000000..022f7e10 --- /dev/null +++ b/docs/models/agenttool.md @@ -0,0 +1,41 @@ +# AgentTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agenttools.md b/docs/models/agenttools.md deleted file mode 100644 index 15891f56..00000000 --- a/docs/models/agenttools.md +++ /dev/null @@ -1,41 +0,0 @@ -# AgentTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md deleted file mode 100644 index 9da03d03..00000000 --- a/docs/models/agentupdaterequest.md +++ /dev/null @@ -1,14 +0,0 @@ -# AgentUpdateRequest - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTools](../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequesttools.md b/docs/models/agentupdaterequesttools.md deleted file mode 100644 index 1752ee68..00000000 --- a/docs/models/agentupdaterequesttools.md +++ /dev/null @@ -1,41 +0,0 @@ -# AgentUpdateRequestTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/apiendpoint.md b/docs/models/apiendpoint.md index 700b932f..8d83a26f 100644 --- a/docs/models/apiendpoint.md +++ b/docs/models/apiendpoint.md @@ -3,10 +3,15 @@ ## Values -| Name | Value | -| -------------------------- | -------------------------- | -| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | -| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | -| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | -| `ROOT_V1_MODERATIONS` | /v1/moderations | -| `ROOT_V1_CHAT_MODERATIONS` | /v1/chat/moderations | \ No newline at end of file +| Name | Value | +| ------------------------------ | ------------------------------ | +| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | +| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | +| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | +| `ROOT_V1_MODERATIONS` | /v1/moderations | +| `ROOT_V1_CHAT_MODERATIONS` | /v1/chat/moderations | +| `ROOT_V1_OCR` | /v1/ocr | +| `ROOT_V1_CLASSIFICATIONS` | /v1/classifications | +| `ROOT_V1_CHAT_CLASSIFICATIONS` | /v1/chat/classifications | +| `ROOT_V1_CONVERSATIONS` | /v1/conversations | +| `ROOT_V1_AUDIO_TRANSCRIPTIONS` | /v1/audio/transcriptions | \ No newline at end of file diff --git a/docs/models/archiveftmodelout.md b/docs/models/archiveftmodelout.md deleted file mode 100644 index 46a9e755..00000000 --- a/docs/models/archiveftmodelout.md +++ /dev/null @@ -1,10 +0,0 @@ -# ArchiveFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ArchiveFTModelOutObject]](../models/archiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/archiveftmodeloutobject.md b/docs/models/archiveftmodeloutobject.md deleted file mode 100644 index f6f46889..00000000 --- a/docs/models/archiveftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ArchiveFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/archivemodelresponse.md b/docs/models/archivemodelresponse.md new file mode 100644 index 00000000..276656d1 --- /dev/null +++ b/docs/models/archivemodelresponse.md @@ -0,0 +1,10 @@ +# ArchiveModelResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/docs/models/assistantmessagerole.md b/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/attributes.md b/docs/models/attributes.md new file mode 100644 index 00000000..147708d9 --- /dev/null +++ b/docs/models/attributes.md @@ -0,0 +1,59 @@ +# Attributes + + +## Supported Types + +### `bool` + +```python +value: bool = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + +### `datetime` + +```python +value: datetime = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + +### `List[int]` + +```python +value: List[int] = /* values here */ +``` + +### `List[float]` + +```python +value: List[float] = /* values here */ +``` + +### `List[bool]` + +```python +value: List[bool] = /* values here */ +``` + diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md index c443e7ad..1ba8b0f5 100644 --- a/docs/models/audiochunk.md +++ b/docs/models/audiochunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `input_audio` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.AudioChunkType]](../models/audiochunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | +| `input_audio` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiochunktype.md b/docs/models/audiochunktype.md deleted file mode 100644 index 46ebf372..00000000 --- a/docs/models/audiochunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AudioChunkType - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `INPUT_AUDIO` | input_audio | \ No newline at end of file diff --git a/docs/models/audioencoding.md b/docs/models/audioencoding.md new file mode 100644 index 00000000..feec8c71 --- /dev/null +++ b/docs/models/audioencoding.md @@ -0,0 +1,13 @@ +# AudioEncoding + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `PCM_S16LE` | pcm_s16le | +| `PCM_S32LE` | pcm_s32le | +| `PCM_F16LE` | pcm_f16le | +| `PCM_F32LE` | pcm_f32le | +| `PCM_MULAW` | pcm_mulaw | +| `PCM_ALAW` | pcm_alaw | \ No newline at end of file diff --git a/docs/models/audioformat.md b/docs/models/audioformat.md new file mode 100644 index 00000000..d174ab99 --- /dev/null +++ b/docs/models/audioformat.md @@ -0,0 +1,9 @@ +# AudioFormat + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `encoding` | [models.AudioEncoding](../models/audioencoding.md) | :heavy_check_mark: | N/A | +| `sample_rate` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index e876de18..80bd5301 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -3,13 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | -| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | -| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | -| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | -| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequeststream.md b/docs/models/audiotranscriptionrequeststream.md index 975e437a..5d64964d 100644 --- a/docs/models/audiotranscriptionrequeststream.md +++ b/docs/models/audiotranscriptionrequeststream.md @@ -12,4 +12,6 @@ | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `stream` | *Optional[Literal[True]]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 58ad5e25..0f42504f 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -17,4 +17,4 @@ | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.BaseModelCardType]](../models/basemodelcardtype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Literal["base"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/basemodelcardtype.md b/docs/models/basemodelcardtype.md deleted file mode 100644 index 4a40ce76..00000000 --- a/docs/models/basemodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# BaseModelCardType - - -## Values - -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file diff --git a/docs/models/batchjob.md b/docs/models/batchjob.md new file mode 100644 index 00000000..162e2cff --- /dev/null +++ b/docs/models/batchjob.md @@ -0,0 +1,26 @@ +# BatchJob + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["batch"]]* | :heavy_minus_sign: | N/A | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `endpoint` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | +| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `total_requests` | *int* | :heavy_check_mark: | N/A | +| `completed_requests` | *int* | :heavy_check_mark: | N/A | +| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | +| `failed_requests` | *int* | :heavy_check_mark: | N/A | +| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md deleted file mode 100644 index b5b13786..00000000 --- a/docs/models/batchjobin.md +++ /dev/null @@ -1,13 +0,0 @@ -# BatchJobIn - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | -| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md deleted file mode 100644 index b66fff08..00000000 --- a/docs/models/batchjobout.md +++ /dev/null @@ -1,25 +0,0 @@ -# BatchJobOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `endpoint` | *str* | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | -| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | -| `created_at` | *int* | :heavy_check_mark: | N/A | -| `total_requests` | *int* | :heavy_check_mark: | N/A | -| `completed_requests` | *int* | :heavy_check_mark: | N/A | -| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | -| `failed_requests` | *int* | :heavy_check_mark: | N/A | -| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjoboutobject.md b/docs/models/batchjoboutobject.md deleted file mode 100644 index 64ae8965..00000000 --- a/docs/models/batchjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# BatchJobOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md deleted file mode 100644 index a76cfdcc..00000000 --- a/docs/models/batchjobsout.md +++ /dev/null @@ -1,10 +0,0 @@ -# BatchJobsOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobsoutobject.md b/docs/models/batchjobsoutobject.md deleted file mode 100644 index d4bf9f65..00000000 --- a/docs/models/batchjobsoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# BatchJobsOutObject - - -## Values - -| Name | Value | -| ------ | ------ | -| `LIST` | list | \ No newline at end of file diff --git a/docs/models/batchrequest.md b/docs/models/batchrequest.md new file mode 100644 index 00000000..6ee3b394 --- /dev/null +++ b/docs/models/batchrequest.md @@ -0,0 +1,9 @@ +# BatchRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `custom_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `body` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md index 910d62ae..ba9c95ea 100644 --- a/docs/models/chatclassificationrequest.md +++ b/docs/models/chatclassificationrequest.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | | `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file +| `input` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md index d77d286e..deaa0ea0 100644 --- a/docs/models/chatcompletionchoice.md +++ b/docs/models/chatcompletionchoice.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | 0 | -| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | -| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md b/docs/models/chatcompletionchoicefinishreason.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md rename to docs/models/chatcompletionchoicefinishreason.md diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index a9806a4d..921161fa 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -3,23 +3,24 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequestmessage.md b/docs/models/chatcompletionrequestmessage.md new file mode 100644 index 00000000..91e9e062 --- /dev/null +++ b/docs/models/chatcompletionrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md b/docs/models/chatcompletionrequeststop.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionrequeststop.md rename to docs/models/chatcompletionrequeststop.md diff --git a/docs/models/chatcompletionrequesttoolchoice.md b/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/docs/models/chatcompletionrequesttoolchoice.md +++ b/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 6faeb411..8761f000 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -3,23 +3,24 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequestmessage.md b/docs/models/chatcompletionstreamrequestmessage.md new file mode 100644 index 00000000..2e4e93ac --- /dev/null +++ b/docs/models/chatcompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequestmessages.md b/docs/models/chatcompletionstreamrequestmessages.md deleted file mode 100644 index 47990611..00000000 --- a/docs/models/chatcompletionstreamrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionStreamRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md index 69b6c1dc..f252482d 100644 --- a/docs/models/chatmoderationrequest.md +++ b/docs/models/chatmoderationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmoderationrequestinputs.md b/docs/models/chatmoderationrequestinputs.md deleted file mode 100644 index cf775d60..00000000 --- a/docs/models/chatmoderationrequestinputs.md +++ /dev/null @@ -1,19 +0,0 @@ -# ChatModerationRequestInputs - -Chat to classify - - -## Supported Types - -### `List[models.One]` - -```python -value: List[models.One] = /* values here */ -``` - -### `List[List[models.Two]]` - -```python -value: List[List[models.Two]] = /* values here */ -``` - diff --git a/docs/models/chatmoderationrequestinputs1.md b/docs/models/chatmoderationrequestinputs1.md new file mode 100644 index 00000000..e15b8a84 --- /dev/null +++ b/docs/models/chatmoderationrequestinputs1.md @@ -0,0 +1,29 @@ +# ChatModerationRequestInputs1 + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequestinputs2.md b/docs/models/chatmoderationrequestinputs2.md new file mode 100644 index 00000000..f40a4ebe --- /dev/null +++ b/docs/models/chatmoderationrequestinputs2.md @@ -0,0 +1,29 @@ +# ChatModerationRequestInputs2 + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequestinputs3.md b/docs/models/chatmoderationrequestinputs3.md new file mode 100644 index 00000000..ff1c6ea3 --- /dev/null +++ b/docs/models/chatmoderationrequestinputs3.md @@ -0,0 +1,19 @@ +# ChatModerationRequestInputs3 + +Chat to classify + + +## Supported Types + +### `List[models.ChatModerationRequestInputs1]` + +```python +value: List[models.ChatModerationRequestInputs1] = /* values here */ +``` + +### `List[List[models.ChatModerationRequestInputs2]]` + +```python +value: List[List[models.ChatModerationRequestInputs2]] = /* values here */ +``` + diff --git a/docs/models/checkpoint.md b/docs/models/checkpoint.md new file mode 100644 index 00000000..f7f35530 --- /dev/null +++ b/docs/models/checkpoint.md @@ -0,0 +1,10 @@ +# Checkpoint + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `metrics` | [models.Metric](../models/metric.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | +| `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/checkpointout.md b/docs/models/checkpointout.md deleted file mode 100644 index 053592d2..00000000 --- a/docs/models/checkpointout.md +++ /dev/null @@ -1,10 +0,0 @@ -# CheckpointOut - - -## Fields - -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `metrics` | [models.MetricOut](../models/metricout.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | -| `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md index b9befc89..99cdc4a0 100644 --- a/docs/models/classificationrequest.md +++ b/docs/models/classificationrequest.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | \ No newline at end of file diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierdetailedjobout.md deleted file mode 100644 index ccc88f89..00000000 --- a/docs/models/classifierdetailedjobout.md +++ /dev/null @@ -1,26 +0,0 @@ -# ClassifierDetailedJobOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | -| `created_at` | *int* | :heavy_check_mark: | N/A | -| `modified_at` | *int* | :heavy_check_mark: | N/A | -| `training_files` | List[*str*] | :heavy_check_mark: | N/A | -| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | -| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.ClassifierDetailedJobOutIntegrations](../models/classifierdetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | -| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierDetailedJobOutJobType]](../models/classifierdetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | -| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | -| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegrations.md b/docs/models/classifierdetailedjoboutintegrations.md deleted file mode 100644 index 5a09465e..00000000 --- a/docs/models/classifierdetailedjoboutintegrations.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierDetailedJobOutIntegrations - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/classifierdetailedjoboutjobtype.md b/docs/models/classifierdetailedjoboutjobtype.md deleted file mode 100644 index 0d1c6573..00000000 --- a/docs/models/classifierdetailedjoboutjobtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierDetailedJobOutJobType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutobject.md b/docs/models/classifierdetailedjoboutobject.md deleted file mode 100644 index 08cbcffc..00000000 --- a/docs/models/classifierdetailedjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierDetailedJobOutObject - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutstatus.md b/docs/models/classifierdetailedjoboutstatus.md deleted file mode 100644 index c3118aaf..00000000 --- a/docs/models/classifierdetailedjoboutstatus.md +++ /dev/null @@ -1,17 +0,0 @@ -# ClassifierDetailedJobOutStatus - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `QUEUED` | QUEUED | -| `STARTED` | STARTED | -| `VALIDATING` | VALIDATING | -| `VALIDATED` | VALIDATED | -| `RUNNING` | RUNNING | -| `FAILED_VALIDATION` | FAILED_VALIDATION | -| `FAILED` | FAILED | -| `SUCCESS` | SUCCESS | -| `CANCELLED` | CANCELLED | -| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifierfinetunedmodel.md b/docs/models/classifierfinetunedmodel.md new file mode 100644 index 00000000..ad05f931 --- /dev/null +++ b/docs/models/classifierfinetunedmodel.md @@ -0,0 +1,23 @@ +# ClassifierFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjob.md b/docs/models/classifierfinetuningjob.md new file mode 100644 index 00000000..369756ba --- /dev/null +++ b/docs/models/classifierfinetuningjob.md @@ -0,0 +1,23 @@ +# ClassifierFineTuningJob + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierFineTuningJobStatus](../models/classifierfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.ClassifierFineTuningJobIntegration](../models/classifierfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobdetails.md b/docs/models/classifierfinetuningjobdetails.md new file mode 100644 index 00000000..c5efdf1c --- /dev/null +++ b/docs/models/classifierfinetuningjobdetails.md @@ -0,0 +1,26 @@ +# ClassifierFineTuningJobDetails + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierFineTuningJobDetailsStatus](../models/classifierfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierFineTuningJobDetailsIntegration](../models/classifierfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobdetailsintegration.md b/docs/models/classifierfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..438a35d9 --- /dev/null +++ b/docs/models/classifierfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/classifierfinetuningjobdetailsstatus.md b/docs/models/classifierfinetuningjobdetailsstatus.md new file mode 100644 index 00000000..058c6583 --- /dev/null +++ b/docs/models/classifierfinetuningjobdetailsstatus.md @@ -0,0 +1,17 @@ +# ClassifierFineTuningJobDetailsStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobintegration.md b/docs/models/classifierfinetuningjobintegration.md new file mode 100644 index 00000000..820aee4c --- /dev/null +++ b/docs/models/classifierfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/classifierfinetuningjobstatus.md b/docs/models/classifierfinetuningjobstatus.md new file mode 100644 index 00000000..ca829885 --- /dev/null +++ b/docs/models/classifierfinetuningjobstatus.md @@ -0,0 +1,19 @@ +# ClassifierFineTuningJobStatus + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md deleted file mode 100644 index dd9e8bf9..00000000 --- a/docs/models/classifierftmodelout.md +++ /dev/null @@ -1,23 +0,0 @@ -# ClassifierFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | [Optional[models.ClassifierFTModelOutModelType]](../models/classifierftmodeloutmodeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutmodeltype.md b/docs/models/classifierftmodeloutmodeltype.md deleted file mode 100644 index e1e7e465..00000000 --- a/docs/models/classifierftmodeloutmodeltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierFTModelOutModelType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutobject.md b/docs/models/classifierftmodeloutobject.md deleted file mode 100644 index 9fe05bcf..00000000 --- a/docs/models/classifierftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md deleted file mode 100644 index aa1d3ca9..00000000 --- a/docs/models/classifierjobout.md +++ /dev/null @@ -1,23 +0,0 @@ -# ClassifierJobOut - - -## Fields - -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The ID of the job. | -| `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | -| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | -| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | -| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | -| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.ClassifierJobOutIntegrations](../models/classifierjoboutintegrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | -| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | -| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjoboutintegrations.md b/docs/models/classifierjoboutintegrations.md deleted file mode 100644 index d938d0b9..00000000 --- a/docs/models/classifierjoboutintegrations.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierJobOutIntegrations - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/classifierjoboutjobtype.md b/docs/models/classifierjoboutjobtype.md deleted file mode 100644 index 7f5236fa..00000000 --- a/docs/models/classifierjoboutjobtype.md +++ /dev/null @@ -1,10 +0,0 @@ -# ClassifierJobOutJobType - -The type of job (`FT` for fine-tuning). - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierjoboutobject.md b/docs/models/classifierjoboutobject.md deleted file mode 100644 index 1b42d547..00000000 --- a/docs/models/classifierjoboutobject.md +++ /dev/null @@ -1,10 +0,0 @@ -# ClassifierJobOutObject - -The object type of the fine-tuning job. - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/classifierjoboutstatus.md b/docs/models/classifierjoboutstatus.md deleted file mode 100644 index 4520f164..00000000 --- a/docs/models/classifierjoboutstatus.md +++ /dev/null @@ -1,19 +0,0 @@ -# ClassifierJobOutStatus - -The current status of the fine-tuning job. - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `QUEUED` | QUEUED | -| `STARTED` | STARTED | -| `VALIDATING` | VALIDATING | -| `VALIDATED` | VALIDATED | -| `RUNNING` | RUNNING | -| `FAILED_VALIDATION` | FAILED_VALIDATION | -| `FAILED` | FAILED | -| `SUCCESS` | SUCCESS | -| `CANCELLED` | CANCELLED | -| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifiertarget.md b/docs/models/classifiertarget.md new file mode 100644 index 00000000..f8c99e2e --- /dev/null +++ b/docs/models/classifiertarget.md @@ -0,0 +1,11 @@ +# ClassifierTarget + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `loss_function` | [OptionalNullable[models.FTClassifierLossFunction]](../models/ftclassifierlossfunction.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertargetin.md b/docs/models/classifiertargetin.md deleted file mode 100644 index 78cab67b..00000000 --- a/docs/models/classifiertargetin.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierTargetIn - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `name` | *str* | :heavy_check_mark: | N/A | -| `labels` | List[*str*] | :heavy_check_mark: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `loss_function` | [OptionalNullable[models.FTClassifierLossFunction]](../models/ftclassifierlossfunction.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertargetout.md b/docs/models/classifiertargetout.md deleted file mode 100644 index 57535ae5..00000000 --- a/docs/models/classifiertargetout.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierTargetOut - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `name` | *str* | :heavy_check_mark: | N/A | -| `labels` | List[*str*] | :heavy_check_mark: | N/A | -| `weight` | *float* | :heavy_check_mark: | N/A | -| `loss_function` | [models.FTClassifierLossFunction](../models/ftclassifierlossfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertargetresult.md b/docs/models/classifiertargetresult.md new file mode 100644 index 00000000..ccadc623 --- /dev/null +++ b/docs/models/classifiertargetresult.md @@ -0,0 +1,11 @@ +# ClassifierTargetResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *float* | :heavy_check_mark: | N/A | +| `loss_function` | [models.FTClassifierLossFunction](../models/ftclassifierlossfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertrainingparametersin.md b/docs/models/classifiertrainingparametersin.md deleted file mode 100644 index 1287c973..00000000 --- a/docs/models/classifiertrainingparametersin.md +++ /dev/null @@ -1,15 +0,0 @@ -# ClassifierTrainingParametersIn - -The fine-tuning hyperparameter settings used in a classifier fine-tune job. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | -| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md index d5ad789e..6302fc62 100644 --- a/docs/models/codeinterpretertool.md +++ b/docs/models/codeinterpretertool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.CodeInterpreterToolType]](../models/codeinterpretertooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertooltype.md b/docs/models/codeinterpretertooltype.md deleted file mode 100644 index f704b65e..00000000 --- a/docs/models/codeinterpretertooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# CodeInterpreterToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `CODE_INTERPRETER` | code_interpreter | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 0d108225..148f7608 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -5,15 +5,15 @@ White-listed arguments from the completion API ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | -| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | -| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | -| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md deleted file mode 100644 index 84613080..00000000 --- a/docs/models/completiondetailedjobout.md +++ /dev/null @@ -1,26 +0,0 @@ -# CompletionDetailedJobOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | -| `created_at` | *int* | :heavy_check_mark: | N/A | -| `modified_at` | *int* | :heavy_check_mark: | N/A | -| `training_files` | List[*str*] | :heavy_check_mark: | N/A | -| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | -| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.CompletionDetailedJobOutIntegrations](../models/completiondetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | -| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.CompletionDetailedJobOutJobType]](../models/completiondetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | -| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionDetailedJobOutRepositories](../models/completiondetailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | -| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutintegrations.md b/docs/models/completiondetailedjoboutintegrations.md deleted file mode 100644 index af6bbcc5..00000000 --- a/docs/models/completiondetailedjoboutintegrations.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionDetailedJobOutIntegrations - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/completiondetailedjoboutjobtype.md b/docs/models/completiondetailedjoboutjobtype.md deleted file mode 100644 index fb24db0c..00000000 --- a/docs/models/completiondetailedjoboutjobtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionDetailedJobOutJobType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutobject.md b/docs/models/completiondetailedjoboutobject.md deleted file mode 100644 index 1bec88e5..00000000 --- a/docs/models/completiondetailedjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionDetailedJobOutObject - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutrepositories.md b/docs/models/completiondetailedjoboutrepositories.md deleted file mode 100644 index 4f9727c3..00000000 --- a/docs/models/completiondetailedjoboutrepositories.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionDetailedJobOutRepositories - - -## Supported Types - -### `models.GithubRepositoryOut` - -```python -value: models.GithubRepositoryOut = /* values here */ -``` - diff --git a/docs/models/completiondetailedjoboutstatus.md b/docs/models/completiondetailedjoboutstatus.md deleted file mode 100644 index b80525ba..00000000 --- a/docs/models/completiondetailedjoboutstatus.md +++ /dev/null @@ -1,17 +0,0 @@ -# CompletionDetailedJobOutStatus - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `QUEUED` | QUEUED | -| `STARTED` | STARTED | -| `VALIDATING` | VALIDATING | -| `VALIDATED` | VALIDATED | -| `RUNNING` | RUNNING | -| `FAILED_VALIDATION` | FAILED_VALIDATION | -| `FAILED` | FAILED | -| `SUCCESS` | SUCCESS | -| `CANCELLED` | CANCELLED | -| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionfinetunedmodel.md b/docs/models/completionfinetunedmodel.md new file mode 100644 index 00000000..0055db02 --- /dev/null +++ b/docs/models/completionfinetunedmodel.md @@ -0,0 +1,22 @@ +# CompletionFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjob.md b/docs/models/completionfinetuningjob.md new file mode 100644 index 00000000..83c0ae7e --- /dev/null +++ b/docs/models/completionfinetuningjob.md @@ -0,0 +1,24 @@ +# CompletionFineTuningJob + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.CompletionFineTuningJobStatus](../models/completionfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.CompletionFineTuningJobIntegration](../models/completionfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | +| `repositories` | List[[models.CompletionFineTuningJobRepository](../models/completionfinetuningjobrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobdetails.md b/docs/models/completionfinetuningjobdetails.md new file mode 100644 index 00000000..3c54e874 --- /dev/null +++ b/docs/models/completionfinetuningjobdetails.md @@ -0,0 +1,26 @@ +# CompletionFineTuningJobDetails + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.CompletionFineTuningJobDetailsStatus](../models/completionfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionFineTuningJobDetailsIntegration](../models/completionfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | +| `repositories` | List[[models.CompletionFineTuningJobDetailsRepository](../models/completionfinetuningjobdetailsrepository.md)] | :heavy_minus_sign: | N/A | +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobdetailsintegration.md b/docs/models/completionfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..38f6a349 --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobdetailsrepository.md b/docs/models/completionfinetuningjobdetailsrepository.md new file mode 100644 index 00000000..c6bd67cd --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobdetailsstatus.md b/docs/models/completionfinetuningjobdetailsstatus.md new file mode 100644 index 00000000..94d795a9 --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsstatus.md @@ -0,0 +1,17 @@ +# CompletionFineTuningJobDetailsStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobintegration.md b/docs/models/completionfinetuningjobintegration.md new file mode 100644 index 00000000..dbe57417 --- /dev/null +++ b/docs/models/completionfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobrepository.md b/docs/models/completionfinetuningjobrepository.md new file mode 100644 index 00000000..54225e27 --- /dev/null +++ b/docs/models/completionfinetuningjobrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobstatus.md b/docs/models/completionfinetuningjobstatus.md new file mode 100644 index 00000000..db151a1b --- /dev/null +++ b/docs/models/completionfinetuningjobstatus.md @@ -0,0 +1,19 @@ +# CompletionFineTuningJobStatus + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md deleted file mode 100644 index cd085825..00000000 --- a/docs/models/completionftmodelout.md +++ /dev/null @@ -1,22 +0,0 @@ -# CompletionFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | [Optional[models.ModelType]](../models/modeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionftmodeloutobject.md b/docs/models/completionftmodeloutobject.md deleted file mode 100644 index 6f9d858c..00000000 --- a/docs/models/completionftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md deleted file mode 100644 index cb471746..00000000 --- a/docs/models/completionjobout.md +++ /dev/null @@ -1,24 +0,0 @@ -# CompletionJobOut - - -## Fields - -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The ID of the job. | -| `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | -| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | -| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | -| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | -| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | -| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.JobType]](../models/jobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | -| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionjoboutobject.md b/docs/models/completionjoboutobject.md deleted file mode 100644 index 712b107d..00000000 --- a/docs/models/completionjoboutobject.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionJobOutObject - -The object type of the fine-tuning job. - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/completiontrainingparametersin.md b/docs/models/completiontrainingparametersin.md deleted file mode 100644 index 9fcc714e..00000000 --- a/docs/models/completiontrainingparametersin.md +++ /dev/null @@ -1,16 +0,0 @@ -# CompletionTrainingParametersIn - -The fine-tuning hyperparameter settings used in a fine-tune job. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | -| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/confirmation.md b/docs/models/confirmation.md new file mode 100644 index 00000000..fd6e6aaa --- /dev/null +++ b/docs/models/confirmation.md @@ -0,0 +1,9 @@ +# Confirmation + + +## Values + +| Name | Value | +| ------- | ------- | +| `ALLOW` | allow | +| `DENY` | deny | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/content.md deleted file mode 100644 index a833dc2c..00000000 --- a/docs/models/content.md +++ /dev/null @@ -1,17 +0,0 @@ -# Content - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[models.ContentChunk]` - -```python -value: List[models.ContentChunk] = /* values here */ -``` - diff --git a/docs/models/conversationappendrequest.md b/docs/models/conversationappendrequest.md index 1cdb584b..78a96508 100644 --- a/docs/models/conversationappendrequest.md +++ b/docs/models/conversationappendrequest.md @@ -5,8 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequest.md b/docs/models/conversationappendstreamrequest.md index a8516ea7..daea9c52 100644 --- a/docs/models/conversationappendstreamrequest.md +++ b/docs/models/conversationappendstreamrequest.md @@ -5,8 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md index ebb1d513..daefe336 100644 --- a/docs/models/conversationhistory.md +++ b/docs/models/conversationhistory.md @@ -5,8 +5,8 @@ Retrieve all entries in a conversation. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `object` | *Optional[Literal["conversation.history"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistoryobject.md b/docs/models/conversationhistoryobject.md deleted file mode 100644 index a14e7f9c..00000000 --- a/docs/models/conversationhistoryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationHistoryObject - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `CONVERSATION_HISTORY` | conversation.history | \ No newline at end of file diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md index c3f00979..8fa51571 100644 --- a/docs/models/conversationmessages.md +++ b/docs/models/conversationmessages.md @@ -5,8 +5,8 @@ Similar to the conversation history but only keep the messages ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `object` | *Optional[Literal["conversation.messages"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationmessagesobject.md b/docs/models/conversationmessagesobject.md deleted file mode 100644 index db3a441b..00000000 --- a/docs/models/conversationmessagesobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationMessagesObject - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `CONVERSATION_MESSAGES` | conversation.messages | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 141533e7..bd7823a8 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -3,16 +3,18 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | N/A | -| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrequestagentversion.md b/docs/models/conversationrequestagentversion.md new file mode 100644 index 00000000..9f251821 --- /dev/null +++ b/docs/models/conversationrequestagentversion.md @@ -0,0 +1,17 @@ +# ConversationRequestAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrequesthandoffexecution.md b/docs/models/conversationrequesthandoffexecution.md new file mode 100644 index 00000000..e7314f7e --- /dev/null +++ b/docs/models/conversationrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationrequesttool.md b/docs/models/conversationrequesttool.md new file mode 100644 index 00000000..2e4e8d01 --- /dev/null +++ b/docs/models/conversationrequesttool.md @@ -0,0 +1,41 @@ +# ConversationRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index 38cdadd0..2732f785 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -5,9 +5,9 @@ The response after appending new entries to the conversation. ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `outputs` | List[[models.Outputs](../models/outputs.md)] | :heavy_check_mark: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `object` | *Optional[Literal["conversation.response"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponseobject.md b/docs/models/conversationresponseobject.md deleted file mode 100644 index bea66e52..00000000 --- a/docs/models/conversationresponseobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationResponseObject - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `CONVERSATION_RESPONSE` | conversation.response | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 61679df6..ad3ff362 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -7,9 +7,11 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartrequestagentversion.md b/docs/models/conversationrestartrequestagentversion.md new file mode 100644 index 00000000..019ba301 --- /dev/null +++ b/docs/models/conversationrestartrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 9548b336..865a1e8f 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -7,9 +7,11 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequestagentversion.md b/docs/models/conversationrestartstreamrequestagentversion.md new file mode 100644 index 00000000..9e006300 --- /dev/null +++ b/docs/models/conversationrestartstreamrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartStreamRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index a571e2af..8b74f9e7 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -10,9 +10,11 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTool](../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequestagentversion.md b/docs/models/conversationstreamrequestagentversion.md new file mode 100644 index 00000000..52ee9672 --- /dev/null +++ b/docs/models/conversationstreamrequestagentversion.md @@ -0,0 +1,17 @@ +# ConversationStreamRequestAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationstreamrequesttool.md b/docs/models/conversationstreamrequesttool.md new file mode 100644 index 00000000..0f75f82b --- /dev/null +++ b/docs/models/conversationstreamrequesttool.md @@ -0,0 +1,41 @@ +# ConversationStreamRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationstreamrequesttools.md b/docs/models/conversationstreamrequesttools.md deleted file mode 100644 index 700c8448..00000000 --- a/docs/models/conversationstreamrequesttools.md +++ /dev/null @@ -1,41 +0,0 @@ -# ConversationStreamRequestTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/conversationthinkchunk.md b/docs/models/conversationthinkchunk.md new file mode 100644 index 00000000..1fb16bd9 --- /dev/null +++ b/docs/models/conversationthinkchunk.md @@ -0,0 +1,10 @@ +# ConversationThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `type` | *Optional[Literal["thinking"]]* | :heavy_minus_sign: | N/A | +| `thinking` | List[[models.ConversationThinkChunkThinking](../models/conversationthinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationthinkchunkthinking.md b/docs/models/conversationthinkchunkthinking.md new file mode 100644 index 00000000..84b80018 --- /dev/null +++ b/docs/models/conversationthinkchunkthinking.md @@ -0,0 +1,17 @@ +# ConversationThinkChunkThinking + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/createagentrequest.md b/docs/models/createagentrequest.md new file mode 100644 index 00000000..cca3a079 --- /dev/null +++ b/docs/models/createagentrequest.md @@ -0,0 +1,16 @@ +# CreateAgentRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/createagentrequesttool.md b/docs/models/createagentrequesttool.md new file mode 100644 index 00000000..c6ed3e98 --- /dev/null +++ b/docs/models/createagentrequesttool.md @@ -0,0 +1,41 @@ +# CreateAgentRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/createbatchjobrequest.md b/docs/models/createbatchjobrequest.md new file mode 100644 index 00000000..d094e2d5 --- /dev/null +++ b/docs/models/createbatchjobrequest.md @@ -0,0 +1,14 @@ +# CreateBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `requests` | List[[models.BatchRequest](../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | +| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | \ No newline at end of file diff --git a/docs/models/createfileresponse.md b/docs/models/createfileresponse.md new file mode 100644 index 00000000..8152922b --- /dev/null +++ b/docs/models/createfileresponse.md @@ -0,0 +1,18 @@ +# CreateFileResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/createfinetuningjobrequest.md b/docs/models/createfinetuningjobrequest.md new file mode 100644 index 00000000..a93e323d --- /dev/null +++ b/docs/models/createfinetuningjobrequest.md @@ -0,0 +1,18 @@ +# CreateFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/createfinetuningjobrequestintegration.md b/docs/models/createfinetuningjobrequestintegration.md new file mode 100644 index 00000000..0054a4a6 --- /dev/null +++ b/docs/models/createfinetuningjobrequestintegration.md @@ -0,0 +1,11 @@ +# CreateFineTuningJobRequestIntegration + + +## Supported Types + +### `models.WandbIntegration` + +```python +value: models.WandbIntegration = /* values here */ +``` + diff --git a/docs/models/createfinetuningjobrequestrepository.md b/docs/models/createfinetuningjobrequestrepository.md new file mode 100644 index 00000000..32be1b6d --- /dev/null +++ b/docs/models/createfinetuningjobrequestrepository.md @@ -0,0 +1,11 @@ +# CreateFineTuningJobRequestRepository + + +## Supported Types + +### `models.GithubRepositoryIn` + +```python +value: models.GithubRepositoryIn = /* values here */ +``` + diff --git a/docs/models/createlibraryrequest.md b/docs/models/createlibraryrequest.md new file mode 100644 index 00000000..71562806 --- /dev/null +++ b/docs/models/createlibraryrequest.md @@ -0,0 +1,10 @@ +# CreateLibraryRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `chunk_size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/data.md b/docs/models/data.md deleted file mode 100644 index 95dc8d28..00000000 --- a/docs/models/data.md +++ /dev/null @@ -1,17 +0,0 @@ -# Data - - -## Supported Types - -### `models.BaseModelCard` - -```python -value: models.BaseModelCard = /* values here */ -``` - -### `models.FTModelCard` - -```python -value: models.FTModelCard = /* values here */ -``` - diff --git a/docs/models/deletefileout.md b/docs/models/deletefileout.md deleted file mode 100644 index 4709cc49..00000000 --- a/docs/models/deletefileout.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeleteFileOut - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The ID of the deleted file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type that was deleted | file | -| `deleted` | *bool* | :heavy_check_mark: | The deletion status. | false | \ No newline at end of file diff --git a/docs/models/deletefileresponse.md b/docs/models/deletefileresponse.md new file mode 100644 index 00000000..188e2504 --- /dev/null +++ b/docs/models/deletefileresponse.md @@ -0,0 +1,10 @@ +# DeleteFileResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type that was deleted | file | +| `deleted` | *bool* | :heavy_check_mark: | The deletion status. | false | \ No newline at end of file diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index 61deabbf..e0ee575f 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/deltamessagecontent.md b/docs/models/deltamessagecontent.md new file mode 100644 index 00000000..8142772d --- /dev/null +++ b/docs/models/deltamessagecontent.md @@ -0,0 +1,17 @@ +# DeltaMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/document.md b/docs/models/document.md index 509d43b7..284babb9 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -1,25 +1,26 @@ # Document -Document to run OCR on - - -## Supported Types - -### `models.FileChunk` - -```python -value: models.FileChunk = /* values here */ -``` - -### `models.DocumentURLChunk` - -```python -value: models.DocumentURLChunk = /* values here */ -``` - -### `models.ImageURLChunk` - -```python -value: models.ImageURLChunk = /* values here */ -``` +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md index 82315f32..95c3fa52 100644 --- a/docs/models/documentlibrarytool.md +++ b/docs/models/documentlibrarytool.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | -| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/documentlibrarytooltype.md b/docs/models/documentlibrarytooltype.md deleted file mode 100644 index ebd420f6..00000000 --- a/docs/models/documentlibrarytooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentLibraryToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/documentout.md b/docs/models/documentout.md deleted file mode 100644 index b9e7b212..00000000 --- a/docs/models/documentout.md +++ /dev/null @@ -1,24 +0,0 @@ -# DocumentOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `hash` | *str* | :heavy_check_mark: | N/A | -| `mime_type` | *str* | :heavy_check_mark: | N/A | -| `extension` | *str* | :heavy_check_mark: | N/A | -| `size` | *int* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `processing_status` | *str* | :heavy_check_mark: | N/A | -| `uploaded_by_id` | *str* | :heavy_check_mark: | N/A | -| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | -| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentunion.md b/docs/models/documentunion.md new file mode 100644 index 00000000..e573bd46 --- /dev/null +++ b/docs/models/documentunion.md @@ -0,0 +1,25 @@ +# DocumentUnion + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/docs/models/documentupdatein.md b/docs/models/documentupdatein.md deleted file mode 100644 index 215ae95f..00000000 --- a/docs/models/documentupdatein.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentUpdateIn - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentupload.md b/docs/models/documentupload.md new file mode 100644 index 00000000..4e58a475 --- /dev/null +++ b/docs/models/documentupload.md @@ -0,0 +1,8 @@ +# DocumentUpload + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 6c9a5b4d..9dbfbe50 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 3a778a6f..7269c055 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,9 +3,11 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md index a3f82c1c..527a089b 100644 --- a/docs/models/embeddingrequestinputs.md +++ b/docs/models/embeddingrequestinputs.md @@ -1,6 +1,6 @@ # EmbeddingRequestInputs -Text to embed. +The text content to be embedded, can be a string or an array of strings for fast processing in bulk. ## Supported Types diff --git a/docs/models/encodingformat.md b/docs/models/encodingformat.md new file mode 100644 index 00000000..7d5941cf --- /dev/null +++ b/docs/models/encodingformat.md @@ -0,0 +1,9 @@ +# EncodingFormat + + +## Values + +| Name | Value | +| -------- | -------- | +| `FLOAT` | float | +| `BASE64` | base64 | \ No newline at end of file diff --git a/docs/models/entries.md b/docs/models/entries.md deleted file mode 100644 index 8e5a20d0..00000000 --- a/docs/models/entries.md +++ /dev/null @@ -1,41 +0,0 @@ -# Entries - - -## Supported Types - -### `models.MessageInputEntry` - -```python -value: models.MessageInputEntry = /* values here */ -``` - -### `models.MessageOutputEntry` - -```python -value: models.MessageOutputEntry = /* values here */ -``` - -### `models.FunctionResultEntry` - -```python -value: models.FunctionResultEntry = /* values here */ -``` - -### `models.FunctionCallEntry` - -```python -value: models.FunctionCallEntry = /* values here */ -``` - -### `models.ToolExecutionEntry` - -```python -value: models.ToolExecutionEntry = /* values here */ -``` - -### `models.AgentHandoffEntry` - -```python -value: models.AgentHandoffEntry = /* values here */ -``` - diff --git a/docs/models/entry.md b/docs/models/entry.md new file mode 100644 index 00000000..d934b677 --- /dev/null +++ b/docs/models/entry.md @@ -0,0 +1,41 @@ +# Entry + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/event.md b/docs/models/event.md new file mode 100644 index 00000000..3eebffca --- /dev/null +++ b/docs/models/event.md @@ -0,0 +1,10 @@ +# Event + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the event. | +| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | \ No newline at end of file diff --git a/docs/models/eventout.md b/docs/models/eventout.md deleted file mode 100644 index d9202353..00000000 --- a/docs/models/eventout.md +++ /dev/null @@ -1,10 +0,0 @@ -# EventOut - - -## Fields - -| Field | Type | Required | Description | -| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | -| `name` | *str* | :heavy_check_mark: | The name of the event. | -| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | \ No newline at end of file diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md index b28ab3fe..57d11722 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -7,7 +7,9 @@ | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md deleted file mode 100644 index a5dd1174..00000000 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ /dev/null @@ -1,9 +0,0 @@ -# FilesAPIRoutesUploadFileMultiPartBodyParams - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | -| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/filesignedurl.md b/docs/models/filesignedurl.md deleted file mode 100644 index 52ce3f4f..00000000 --- a/docs/models/filesignedurl.md +++ /dev/null @@ -1,8 +0,0 @@ -# FileSignedURL - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `url` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index 7b785cf0..fde0b625 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index d49a6301..ba62d854 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/finetunedmodelcapabilities.md b/docs/models/finetunedmodelcapabilities.md new file mode 100644 index 00000000..d3203a2a --- /dev/null +++ b/docs/models/finetunedmodelcapabilities.md @@ -0,0 +1,12 @@ +# FineTunedModelCapabilities + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/finishreason.md b/docs/models/finishreason.md deleted file mode 100644 index 2af53f6e..00000000 --- a/docs/models/finishreason.md +++ /dev/null @@ -1,12 +0,0 @@ -# FinishReason - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `STOP` | stop | -| `LENGTH` | length | -| `MODEL_LENGTH` | model_length | -| `ERROR` | error | -| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/format_.md b/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/ftmodelcapabilitiesout.md deleted file mode 100644 index 19690476..00000000 --- a/docs/models/ftmodelcapabilitiesout.md +++ /dev/null @@ -1,12 +0,0 @@ -# FTModelCapabilitiesOut - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index 35032775..409f0526 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,7 +19,7 @@ Extra fields for fine-tuned models. | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["fine-tuned"]* | :heavy_check_mark: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md index fd3aa5c5..2843db9d 100644 --- a/docs/models/functioncallentry.md +++ b/docs/models/functioncallentry.md @@ -3,13 +3,16 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.FunctionCallEntryObject]](../models/functioncallentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FunctionCallEntryType]](../models/functioncallentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.call"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEntryConfirmationStatus]](../models/functioncallentryconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentryconfirmationstatus.md b/docs/models/functioncallentryconfirmationstatus.md new file mode 100644 index 00000000..8948beb6 --- /dev/null +++ b/docs/models/functioncallentryconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEntryConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functioncallentryobject.md b/docs/models/functioncallentryobject.md deleted file mode 100644 index 3cf2e427..00000000 --- a/docs/models/functioncallentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functioncallentrytype.md b/docs/models/functioncallentrytype.md deleted file mode 100644 index 7ea34c52..00000000 --- a/docs/models/functioncallentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `FUNCTION_CALL` | function.call | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md index c25679a5..0e3a36d6 100644 --- a/docs/models/functioncallevent.md +++ b/docs/models/functioncallevent.md @@ -3,12 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEventConfirmationStatus]](../models/functioncalleventconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventconfirmationstatus.md b/docs/models/functioncalleventconfirmationstatus.md new file mode 100644 index 00000000..4a3c8774 --- /dev/null +++ b/docs/models/functioncalleventconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEventConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functioncalleventtype.md b/docs/models/functioncalleventtype.md deleted file mode 100644 index 8cf3f038..00000000 --- a/docs/models/functioncalleventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEventType - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md index 6df54d3d..6a77abfd 100644 --- a/docs/models/functionresultentry.md +++ b/docs/models/functionresultentry.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `object` | [Optional[models.FunctionResultEntryObject]](../models/functionresultentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FunctionResultEntryType]](../models/functionresultentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.result"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentryobject.md b/docs/models/functionresultentryobject.md deleted file mode 100644 index fe52e0a5..00000000 --- a/docs/models/functionresultentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionResultEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functionresultentrytype.md b/docs/models/functionresultentrytype.md deleted file mode 100644 index 35c94d8e..00000000 --- a/docs/models/functionresultentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionResultEntryType - - -## Values - -| Name | Value | -| ----------------- | ----------------- | -| `FUNCTION_RESULT` | function.result | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md index 8c424593..0226b704 100644 --- a/docs/models/functiontool.md +++ b/docs/models/functiontool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | -| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Literal["function"]* | :heavy_check_mark: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functiontooltype.md b/docs/models/functiontooltype.md deleted file mode 100644 index 9c095625..00000000 --- a/docs/models/functiontooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionToolType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/getfileresponse.md b/docs/models/getfileresponse.md new file mode 100644 index 00000000..0edd13e0 --- /dev/null +++ b/docs/models/getfileresponse.md @@ -0,0 +1,19 @@ +# GetFileResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/getsignedurlresponse.md b/docs/models/getsignedurlresponse.md new file mode 100644 index 00000000..bde69323 --- /dev/null +++ b/docs/models/getsignedurlresponse.md @@ -0,0 +1,8 @@ +# GetSignedURLResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `url` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepository.md b/docs/models/githubrepository.md new file mode 100644 index 00000000..827b6f34 --- /dev/null +++ b/docs/models/githubrepository.md @@ -0,0 +1,13 @@ +# GithubRepository + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md index 1584152b..241cf584 100644 --- a/docs/models/githubrepositoryin.md +++ b/docs/models/githubrepositoryin.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.GithubRepositoryInType]](../models/githubrepositoryintype.md) | :heavy_minus_sign: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryintype.md b/docs/models/githubrepositoryintype.md deleted file mode 100644 index 63da967c..00000000 --- a/docs/models/githubrepositoryintype.md +++ /dev/null @@ -1,8 +0,0 @@ -# GithubRepositoryInType - - -## Values - -| Name | Value | -| -------- | -------- | -| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md deleted file mode 100644 index 03f0b266..00000000 --- a/docs/models/githubrepositoryout.md +++ /dev/null @@ -1,13 +0,0 @@ -# GithubRepositoryOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.GithubRepositoryOutType]](../models/githubrepositoryouttype.md) | :heavy_minus_sign: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryouttype.md b/docs/models/githubrepositoryouttype.md deleted file mode 100644 index 46c3eefd..00000000 --- a/docs/models/githubrepositoryouttype.md +++ /dev/null @@ -1,8 +0,0 @@ -# GithubRepositoryOutType - - -## Values - -| Name | Value | -| -------- | -------- | -| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/handoffexecution.md b/docs/models/handoffexecution.md deleted file mode 100644 index 61e7dade..00000000 --- a/docs/models/handoffexecution.md +++ /dev/null @@ -1,9 +0,0 @@ -# HandoffExecution - - -## Values - -| Name | Value | -| -------- | -------- | -| `CLIENT` | client | -| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/hyperparameters.md b/docs/models/hyperparameters.md index 46a6dd6b..b6c00c36 100644 --- a/docs/models/hyperparameters.md +++ b/docs/models/hyperparameters.md @@ -3,15 +3,15 @@ ## Supported Types -### `models.CompletionTrainingParametersIn` +### `models.CompletionTrainingParameters` ```python -value: models.CompletionTrainingParametersIn = /* values here */ +value: models.CompletionTrainingParameters = /* values here */ ``` -### `models.ClassifierTrainingParametersIn` +### `models.ClassifierTrainingParameters` ```python -value: models.ClassifierTrainingParametersIn = /* values here */ +value: models.ClassifierTrainingParameters = /* values here */ ``` diff --git a/docs/models/imagedetail.md b/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md index b8fc9cf4..b476b6f2 100644 --- a/docs/models/imagegenerationtool.md +++ b/docs/models/imagegenerationtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.ImageGenerationToolType]](../models/imagegenerationtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imagegenerationtooltype.md b/docs/models/imagegenerationtooltype.md deleted file mode 100644 index 29681b58..00000000 --- a/docs/models/imagegenerationtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageGenerationToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `IMAGE_GENERATION` | image_generation | \ No newline at end of file diff --git a/docs/models/imageurl.md b/docs/models/imageurl.md index 7c2bcbc3..6358e0ac 100644 --- a/docs/models/imageurl.md +++ b/docs/models/imageurl.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md index f1b926ef..db0c53d2 100644 --- a/docs/models/imageurlchunk.md +++ b/docs/models/imageurlchunk.md @@ -5,7 +5,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunkimageurl.md b/docs/models/imageurlchunkimageurl.md deleted file mode 100644 index 76738908..00000000 --- a/docs/models/imageurlchunkimageurl.md +++ /dev/null @@ -1,17 +0,0 @@ -# ImageURLChunkImageURL - - -## Supported Types - -### `models.ImageURL` - -```python -value: models.ImageURL = /* values here */ -``` - -### `str` - -```python -value: str = /* values here */ -``` - diff --git a/docs/models/imageurlchunktype.md b/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/docs/models/imageurlunion.md b/docs/models/imageurlunion.md new file mode 100644 index 00000000..db97130f --- /dev/null +++ b/docs/models/imageurlunion.md @@ -0,0 +1,17 @@ +# ImageURLUnion + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/inputs.md b/docs/models/inputs.md index 0f62a7ce..d5771207 100644 --- a/docs/models/inputs.md +++ b/docs/models/inputs.md @@ -5,10 +5,10 @@ Chat to classify ## Supported Types -### `models.InstructRequestInputs` +### `models.InstructRequest` ```python -value: models.InstructRequestInputs = /* values here */ +value: models.InstructRequest = /* values here */ ``` ### `List[models.InstructRequest]` diff --git a/docs/models/instructrequest.md b/docs/models/instructrequest.md index 9500cb58..5f0cdfff 100644 --- a/docs/models/instructrequest.md +++ b/docs/models/instructrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `messages` | List[[models.InstructRequestMessages](../models/instructrequestmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestMessage](../models/instructrequestmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md deleted file mode 100644 index 4caa028f..00000000 --- a/docs/models/instructrequestinputs.md +++ /dev/null @@ -1,8 +0,0 @@ -# InstructRequestInputs - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `messages` | List[[models.InstructRequestInputsMessages](../models/instructrequestinputsmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputsmessages.md b/docs/models/instructrequestinputsmessages.md deleted file mode 100644 index 237e131f..00000000 --- a/docs/models/instructrequestinputsmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# InstructRequestInputsMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/instructrequestmessage.md b/docs/models/instructrequestmessage.md new file mode 100644 index 00000000..57ed27ab --- /dev/null +++ b/docs/models/instructrequestmessage.md @@ -0,0 +1,29 @@ +# InstructRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/instructrequestmessages.md b/docs/models/instructrequestmessages.md deleted file mode 100644 index 9c866a7d..00000000 --- a/docs/models/instructrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# InstructRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/integrations.md b/docs/models/integrations.md deleted file mode 100644 index 35214d63..00000000 --- a/docs/models/integrations.md +++ /dev/null @@ -1,11 +0,0 @@ -# Integrations - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/jobin.md b/docs/models/jobin.md deleted file mode 100644 index b9651770..00000000 --- a/docs/models/jobin.md +++ /dev/null @@ -1,18 +0,0 @@ -# JobIn - - -## Fields - -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | -| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegrations](../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | -| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | -| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | -| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobinintegrations.md b/docs/models/jobinintegrations.md deleted file mode 100644 index 91c10242..00000000 --- a/docs/models/jobinintegrations.md +++ /dev/null @@ -1,11 +0,0 @@ -# JobInIntegrations - - -## Supported Types - -### `models.WandbIntegration` - -```python -value: models.WandbIntegration = /* values here */ -``` - diff --git a/docs/models/jobinrepositories.md b/docs/models/jobinrepositories.md deleted file mode 100644 index b94477af..00000000 --- a/docs/models/jobinrepositories.md +++ /dev/null @@ -1,11 +0,0 @@ -# JobInRepositories - - -## Supported Types - -### `models.GithubRepositoryIn` - -```python -value: models.GithubRepositoryIn = /* values here */ -``` - diff --git a/docs/models/jobmetadata.md b/docs/models/jobmetadata.md new file mode 100644 index 00000000..5d8a89dd --- /dev/null +++ b/docs/models/jobmetadata.md @@ -0,0 +1,14 @@ +# JobMetadata + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobmetadataout.md b/docs/models/jobmetadataout.md deleted file mode 100644 index 6218a161..00000000 --- a/docs/models/jobmetadataout.md +++ /dev/null @@ -1,14 +0,0 @@ -# JobMetadataOut - - -## Fields - -| Field | Type | Required | Description | -| --------------------------- | --------------------------- | --------------------------- | --------------------------- | -| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md index 3930aacd..8c259bea 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `inline` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index b062b873..5ceb0b2c 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -12,4 +12,5 @@ | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../models/orderby.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md index 1b331662..fb62eb62 100644 --- a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md @@ -5,15 +5,15 @@ OK ## Supported Types -### `models.ClassifierDetailedJobOut` +### `models.ClassifierFineTuningJobDetails` ```python -value: models.ClassifierDetailedJobOut = /* values here */ +value: models.ClassifierFineTuningJobDetails = /* values here */ ``` -### `models.CompletionDetailedJobOut` +### `models.CompletionFineTuningJobDetails` ```python -value: models.CompletionDetailedJobOut = /* values here */ +value: models.CompletionFineTuningJobDetails = /* values here */ ``` diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md index eeddc3cd..7b52e2ca 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -5,15 +5,15 @@ OK ## Supported Types -### `models.Response1` +### `models.Response` ```python -value: models.Response1 = /* values here */ +value: models.Response = /* values here */ ``` -### `models.LegacyJobMetadataOut` +### `models.LegacyJobMetadata` ```python -value: models.LegacyJobMetadataOut = /* values here */ +value: models.LegacyJobMetadata = /* values here */ ``` diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md index e0d2e361..f7705327 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md @@ -5,15 +5,15 @@ OK ## Supported Types -### `models.ClassifierDetailedJobOut` +### `models.ClassifierFineTuningJobDetails` ```python -value: models.ClassifierDetailedJobOut = /* values here */ +value: models.ClassifierFineTuningJobDetails = /* values here */ ``` -### `models.CompletionDetailedJobOut` +### `models.CompletionFineTuningJobDetails` ```python -value: models.CompletionDetailedJobOut = /* values here */ +value: models.CompletionFineTuningJobDetails = /* values here */ ``` diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md index 3dca3cd8..23c52c34 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md new file mode 100644 index 00000000..40d57686 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsStatus + +The current job state to filter on. When set, the other results are not displayed. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md index 64f4cca6..1a7e71d4 100644 --- a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md @@ -5,15 +5,15 @@ OK ## Supported Types -### `models.ClassifierDetailedJobOut` +### `models.ClassifierFineTuningJobDetails` ```python -value: models.ClassifierDetailedJobOut = /* values here */ +value: models.ClassifierFineTuningJobDetails = /* values here */ ``` -### `models.CompletionDetailedJobOut` +### `models.CompletionFineTuningJobDetails` ```python -value: models.CompletionDetailedJobOut = /* values here */ +value: models.CompletionFineTuningJobDetails = /* values here */ ``` diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md index 6d93832e..dbe49a86 100644 --- a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | -| `update_ft_model_in` | [models.UpdateFTModelIn](../models/updateftmodelin.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `update_model_request` | [models.UpdateModelRequest](../models/updatemodelrequest.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md index 54f4c398..f40350bf 100644 --- a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md @@ -5,15 +5,15 @@ OK ## Supported Types -### `models.ClassifierFTModelOut` +### `models.ClassifierFineTunedModel` ```python -value: models.ClassifierFTModelOut = /* values here */ +value: models.ClassifierFineTunedModel = /* values here */ ``` -### `models.CompletionFTModelOut` +### `models.CompletionFineTunedModel` ```python -value: models.CompletionFTModelOut = /* values here */ +value: models.CompletionFineTunedModel = /* values here */ ``` diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md deleted file mode 100644 index 977013f7..00000000 --- a/docs/models/jobsout.md +++ /dev/null @@ -1,10 +0,0 @@ -# JobsOut - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutdata.md b/docs/models/jobsoutdata.md deleted file mode 100644 index 28cec311..00000000 --- a/docs/models/jobsoutdata.md +++ /dev/null @@ -1,17 +0,0 @@ -# JobsOutData - - -## Supported Types - -### `models.ClassifierJobOut` - -```python -value: models.ClassifierJobOut = /* values here */ -``` - -### `models.CompletionJobOut` - -```python -value: models.CompletionJobOut = /* values here */ -``` - diff --git a/docs/models/jobsoutobject.md b/docs/models/jobsoutobject.md deleted file mode 100644 index f6c8a2c3..00000000 --- a/docs/models/jobsoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# JobsOutObject - - -## Values - -| Name | Value | -| ------ | ------ | -| `LIST` | list | \ No newline at end of file diff --git a/docs/models/jobtype.md b/docs/models/jobtype.md deleted file mode 100644 index 847c6622..00000000 --- a/docs/models/jobtype.md +++ /dev/null @@ -1,10 +0,0 @@ -# JobType - -The type of job (`FT` for fine-tuning). - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/legacyjobmetadata.md b/docs/models/legacyjobmetadata.md new file mode 100644 index 00000000..4705ab4f --- /dev/null +++ b/docs/models/legacyjobmetadata.md @@ -0,0 +1,19 @@ +# LegacyJobMetadata + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | The approximated time (in seconds) for the fine-tuning process to complete. | 220 | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | The cost of the fine-tuning job. | 10 | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | The currency used for the fine-tuning job cost. | EUR | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of tokens consumed by one training step. | 131072 | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens used during the fine-tuning process. | 1310720 | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens in the training dataset. | 305375 | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `details` | *str* | :heavy_check_mark: | N/A | | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | +| `object` | *Optional[Literal["job.metadata"]]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md deleted file mode 100644 index 53a45485..00000000 --- a/docs/models/legacyjobmetadataout.md +++ /dev/null @@ -1,19 +0,0 @@ -# LegacyJobMetadataOut - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | The approximated time (in seconds) for the fine-tuning process to complete. | 220 | -| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | The cost of the fine-tuning job. | 10 | -| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | The currency used for the fine-tuning job cost. | EUR | -| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of tokens consumed by one training step. | 131072 | -| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens used during the fine-tuning process. | 1310720 | -| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens in the training dataset. | 305375 | -| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `details` | *str* | :heavy_check_mark: | N/A | | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | -| `object` | [Optional[models.LegacyJobMetadataOutObject]](../models/legacyjobmetadataoutobject.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataoutobject.md b/docs/models/legacyjobmetadataoutobject.md deleted file mode 100644 index 9873ada8..00000000 --- a/docs/models/legacyjobmetadataoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# LegacyJobMetadataOutObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `JOB_METADATA` | job.metadata | \ No newline at end of file diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/librariesdocumentslistv1request.md index 1b4eb24d..44f63001 100644 --- a/docs/models/librariesdocumentslistv1request.md +++ b/docs/models/librariesdocumentslistv1request.md @@ -9,5 +9,6 @@ | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | | `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/librariesdocumentsupdatev1request.md index 2f18b014..d4630850 100644 --- a/docs/models/librariesdocumentsupdatev1request.md +++ b/docs/models/librariesdocumentsupdatev1request.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | -| `document_update_in` | [models.DocumentUpdateIn](../models/documentupdatein.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `update_document_request` | [models.UpdateDocumentRequest](../models/updatedocumentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1documentupload.md b/docs/models/librariesdocumentsuploadv1documentupload.md deleted file mode 100644 index a0ba95da..00000000 --- a/docs/models/librariesdocumentsuploadv1documentupload.md +++ /dev/null @@ -1,8 +0,0 @@ -# LibrariesDocumentsUploadV1DocumentUpload - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/librariesdocumentsuploadv1request.md index 7c91ca9b..172a6183 100644 --- a/docs/models/librariesdocumentsuploadv1request.md +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `request_body` | [models.LibrariesDocumentsUploadV1DocumentUpload](../models/librariesdocumentsuploadv1documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `request_body` | [models.DocumentUpload](../models/documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesupdatev1request.md b/docs/models/librariesupdatev1request.md index a68ef7a8..c5c142db 100644 --- a/docs/models/librariesupdatev1request.md +++ b/docs/models/librariesupdatev1request.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `library_in_update` | [models.LibraryInUpdate](../models/libraryinupdate.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `update_library_request` | [models.UpdateLibraryRequest](../models/updatelibraryrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/library.md b/docs/models/library.md new file mode 100644 index 00000000..4319f43d --- /dev/null +++ b/docs/models/library.md @@ -0,0 +1,23 @@ +# Library + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `owner_id` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `owner_type` | *str* | :heavy_check_mark: | N/A | +| `total_size` | *int* | :heavy_check_mark: | N/A | +| `nb_documents` | *int* | :heavy_check_mark: | N/A | +| `chunk_size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `emoji` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `explicit_user_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `explicit_workspace_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Generated Name | \ No newline at end of file diff --git a/docs/models/libraryin.md b/docs/models/libraryin.md deleted file mode 100644 index d6b11914..00000000 --- a/docs/models/libraryin.md +++ /dev/null @@ -1,10 +0,0 @@ -# LibraryIn - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `chunk_size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/libraryinupdate.md b/docs/models/libraryinupdate.md deleted file mode 100644 index 4aa169c7..00000000 --- a/docs/models/libraryinupdate.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibraryInUpdate - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/libraryout.md b/docs/models/libraryout.md deleted file mode 100644 index cf4de41b..00000000 --- a/docs/models/libraryout.md +++ /dev/null @@ -1,23 +0,0 @@ -# LibraryOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `owner_id` | *str* | :heavy_check_mark: | N/A | -| `owner_type` | *str* | :heavy_check_mark: | N/A | -| `total_size` | *int* | :heavy_check_mark: | N/A | -| `nb_documents` | *int* | :heavy_check_mark: | N/A | -| `chunk_size` | *Nullable[int]* | :heavy_check_mark: | N/A | -| `emoji` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `generated_description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `explicit_user_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `explicit_workspace_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listbatchjobsresponse.md b/docs/models/listbatchjobsresponse.md new file mode 100644 index 00000000..c23e3220 --- /dev/null +++ b/docs/models/listbatchjobsresponse.md @@ -0,0 +1,10 @@ +# ListBatchJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `data` | List[[models.BatchJob](../models/batchjob.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdocumentout.md b/docs/models/listdocumentout.md deleted file mode 100644 index f14157b8..00000000 --- a/docs/models/listdocumentout.md +++ /dev/null @@ -1,9 +0,0 @@ -# ListDocumentOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A | -| `data` | List[[models.DocumentOut](../models/documentout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdocumentsresponse.md b/docs/models/listdocumentsresponse.md new file mode 100644 index 00000000..47b9d3b7 --- /dev/null +++ b/docs/models/listdocumentsresponse.md @@ -0,0 +1,9 @@ +# ListDocumentsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A | +| `data` | List[[models.Document](../models/document.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md deleted file mode 100644 index ee544c1b..00000000 --- a/docs/models/listfilesout.md +++ /dev/null @@ -1,10 +0,0 @@ -# ListFilesOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | -| `object` | *str* | :heavy_check_mark: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesresponse.md b/docs/models/listfilesresponse.md new file mode 100644 index 00000000..802f685f --- /dev/null +++ b/docs/models/listfilesresponse.md @@ -0,0 +1,10 @@ +# ListFilesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | +| `object` | *str* | :heavy_check_mark: | N/A | +| `total` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponse.md b/docs/models/listfinetuningjobsresponse.md new file mode 100644 index 00000000..00251242 --- /dev/null +++ b/docs/models/listfinetuningjobsresponse.md @@ -0,0 +1,10 @@ +# ListFineTuningJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `data` | List[[models.ListFineTuningJobsResponseData](../models/listfinetuningjobsresponsedata.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponsedata.md b/docs/models/listfinetuningjobsresponsedata.md new file mode 100644 index 00000000..adb06444 --- /dev/null +++ b/docs/models/listfinetuningjobsresponsedata.md @@ -0,0 +1,17 @@ +# ListFineTuningJobsResponseData + + +## Supported Types + +### `models.ClassifierFineTuningJob` + +```python +value: models.ClassifierFineTuningJob = /* values here */ +``` + +### `models.CompletionFineTuningJob` + +```python +value: models.CompletionFineTuningJob = /* values here */ +``` + diff --git a/docs/models/listlibrariesresponse.md b/docs/models/listlibrariesresponse.md new file mode 100644 index 00000000..e21b9ced --- /dev/null +++ b/docs/models/listlibrariesresponse.md @@ -0,0 +1,8 @@ +# ListLibrariesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `data` | List[[models.Library](../models/library.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listlibraryout.md b/docs/models/listlibraryout.md deleted file mode 100644 index db76ffa1..00000000 --- a/docs/models/listlibraryout.md +++ /dev/null @@ -1,8 +0,0 @@ -# ListLibraryOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `data` | List[[models.LibraryOut](../models/libraryout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md index 50795f0e..05617850 100644 --- a/docs/models/messageinputcontentchunks.md +++ b/docs/models/messageinputcontentchunks.md @@ -27,3 +27,9 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` +### `models.ConversationThinkChunk` + +```python +value: models.ConversationThinkChunk = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index d55eb876..f8514fb3 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,13 +5,13 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.input"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.Role](../models/role.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentryrole.md b/docs/models/messageinputentryrole.md deleted file mode 100644 index f2fdc71d..00000000 --- a/docs/models/messageinputentryrole.md +++ /dev/null @@ -1,9 +0,0 @@ -# MessageInputEntryRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | -| `USER` | user | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index 5dc74a89..c4a7777e 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -27,6 +27,12 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` +### `models.ConversationThinkChunk` + +```python +value: models.ConversationThinkChunk = /* values here */ +``` + ### `models.ToolReferenceChunk` ```python diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md index 5b42e20d..73a1c666 100644 --- a/docs/models/messageoutputentry.md +++ b/docs/models/messageoutputentry.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `object` | [Optional[models.MessageOutputEntryObject]](../models/messageoutputentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageOutputEntryType]](../models/messageoutputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | -| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.output"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentryobject.md b/docs/models/messageoutputentryobject.md deleted file mode 100644 index bb254c82..00000000 --- a/docs/models/messageoutputentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageoutputentryrole.md b/docs/models/messageoutputentryrole.md deleted file mode 100644 index 783ee0aa..00000000 --- a/docs/models/messageoutputentryrole.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputentrytype.md b/docs/models/messageoutputentrytype.md deleted file mode 100644 index cb4a7a1b..00000000 --- a/docs/models/messageoutputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `MESSAGE_OUTPUT` | message.output | \ No newline at end of file diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md index 92c1c615..e09a965f 100644 --- a/docs/models/messageoutputevent.md +++ b/docs/models/messageoutputevent.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.MessageOutputEventType]](../models/messageoutputeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | -| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputeventrole.md b/docs/models/messageoutputeventrole.md deleted file mode 100644 index e38c6472..00000000 --- a/docs/models/messageoutputeventrole.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEventRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputeventtype.md b/docs/models/messageoutputeventtype.md deleted file mode 100644 index 1f43fdcc..00000000 --- a/docs/models/messageoutputeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEventType - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `MESSAGE_OUTPUT_DELTA` | message.output.delta | \ No newline at end of file diff --git a/docs/models/messages.md b/docs/models/messages.md deleted file mode 100644 index 1d394500..00000000 --- a/docs/models/messages.md +++ /dev/null @@ -1,29 +0,0 @@ -# Messages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/metadata.md b/docs/models/metadata.md deleted file mode 100644 index e655f580..00000000 --- a/docs/models/metadata.md +++ /dev/null @@ -1,7 +0,0 @@ -# Metadata - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/metric.md b/docs/models/metric.md new file mode 100644 index 00000000..7f863036 --- /dev/null +++ b/docs/models/metric.md @@ -0,0 +1,12 @@ +# Metric + +Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `train_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_mean_token_accuracy` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/metricout.md b/docs/models/metricout.md deleted file mode 100644 index 3c552bac..00000000 --- a/docs/models/metricout.md +++ /dev/null @@ -1,12 +0,0 @@ -# MetricOut - -Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). - - -## Fields - -| Field | Type | Required | Description | -| --------------------------- | --------------------------- | --------------------------- | --------------------------- | -| `train_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `valid_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `valid_mean_token_accuracy` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/mistralpromptmode.md b/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/docs/models/mistralpromptmode.md +++ b/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 36b27938..c7dd2710 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -3,11 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------- | --------------------- | --------------------- | --------------------- | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio_transcription` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index f7f61a79..af2e5c61 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -3,15 +3,16 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.ModelConversationTools](../models/modelconversationtools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationobject.md b/docs/models/modelconversationobject.md deleted file mode 100644 index ead1fa26..00000000 --- a/docs/models/modelconversationobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ModelConversationObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/modelconversationtool.md b/docs/models/modelconversationtool.md new file mode 100644 index 00000000..87235567 --- /dev/null +++ b/docs/models/modelconversationtool.md @@ -0,0 +1,41 @@ +# ModelConversationTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/modelconversationtools.md b/docs/models/modelconversationtools.md deleted file mode 100644 index 5cc97437..00000000 --- a/docs/models/modelconversationtools.md +++ /dev/null @@ -1,41 +0,0 @@ -# ModelConversationTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/modellist.md b/docs/models/modellist.md index 760882c6..85b20be7 100644 --- a/docs/models/modellist.md +++ b/docs/models/modellist.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `data` | List[[models.Data](../models/data.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.ModelListData](../models/modellistdata.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modellistdata.md b/docs/models/modellistdata.md new file mode 100644 index 00000000..b44e84a0 --- /dev/null +++ b/docs/models/modellistdata.md @@ -0,0 +1,17 @@ +# ModelListData + + +## Supported Types + +### `models.BaseModelCard` + +```python +value: models.BaseModelCard = /* values here */ +``` + +### `models.FTModelCard` + +```python +value: models.FTModelCard = /* values here */ +``` + diff --git a/docs/models/modeltype.md b/docs/models/modeltype.md deleted file mode 100644 index a31c3ca0..00000000 --- a/docs/models/modeltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ModelType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/multipartbodyparams.md b/docs/models/multipartbodyparams.md new file mode 100644 index 00000000..f14b9573 --- /dev/null +++ b/docs/models/multipartbodyparams.md @@ -0,0 +1,9 @@ +# MultiPartBodyParams + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/object.md b/docs/models/object.md deleted file mode 100644 index 0122c0db..00000000 --- a/docs/models/object.md +++ /dev/null @@ -1,8 +0,0 @@ -# Object - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/ocrpageobject.md b/docs/models/ocrpageobject.md index 9db3bb77..02473d44 100644 --- a/docs/models/ocrpageobject.md +++ b/docs/models/ocrpageobject.md @@ -8,4 +8,8 @@ | `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | | `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | | `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | | `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 6a9c77ab..dd3fc2ea 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,14 +3,18 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.DocumentUnion](../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/ocrtableobject.md b/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/docs/models/one.md b/docs/models/one.md deleted file mode 100644 index 3de496a6..00000000 --- a/docs/models/one.md +++ /dev/null @@ -1,29 +0,0 @@ -# One - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/orderby.md b/docs/models/orderby.md new file mode 100644 index 00000000..bba50df1 --- /dev/null +++ b/docs/models/orderby.md @@ -0,0 +1,9 @@ +# OrderBy + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `CREATED` | created | +| `MINUS_CREATED` | -created | \ No newline at end of file diff --git a/docs/models/output.md b/docs/models/output.md new file mode 100644 index 00000000..d0ee0db9 --- /dev/null +++ b/docs/models/output.md @@ -0,0 +1,29 @@ +# Output + + +## Supported Types + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index 2da475f7..e5185014 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -27,6 +27,12 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` +### `models.ConversationThinkChunk` + +```python +value: models.ConversationThinkChunk = /* values here */ +``` + ### `models.ToolReferenceChunk` ```python diff --git a/docs/models/outputs.md b/docs/models/outputs.md deleted file mode 100644 index 7756c627..00000000 --- a/docs/models/outputs.md +++ /dev/null @@ -1,29 +0,0 @@ -# Outputs - - -## Supported Types - -### `models.MessageOutputEntry` - -```python -value: models.MessageOutputEntry = /* values here */ -``` - -### `models.ToolExecutionEntry` - -```python -value: models.ToolExecutionEntry = /* values here */ -``` - -### `models.FunctionCallEntry` - -```python -value: models.FunctionCallEntry = /* values here */ -``` - -### `models.AgentHandoffEntry` - -```python -value: models.AgentHandoffEntry = /* values here */ -``` - diff --git a/docs/models/prediction.md b/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/docs/models/prediction.md +++ b/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/docs/models/queryparamstatus.md b/docs/models/queryparamstatus.md deleted file mode 100644 index dcd20908..00000000 --- a/docs/models/queryparamstatus.md +++ /dev/null @@ -1,19 +0,0 @@ -# QueryParamStatus - -The current job state to filter on. When set, the other results are not displayed. - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `QUEUED` | QUEUED | -| `STARTED` | STARTED | -| `VALIDATING` | VALIDATING | -| `VALIDATED` | VALIDATED | -| `RUNNING` | RUNNING | -| `FAILED_VALIDATION` | FAILED_VALIDATION | -| `FAILED` | FAILED | -| `SUCCESS` | SUCCESS | -| `CANCELLED` | CANCELLED | -| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerror.md b/docs/models/realtimetranscriptionerror.md new file mode 100644 index 00000000..e01f2126 --- /dev/null +++ b/docs/models/realtimetranscriptionerror.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionError + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["error"]]* | :heavy_minus_sign: | N/A | +| `error` | [models.RealtimeTranscriptionErrorDetail](../models/realtimetranscriptionerrordetail.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerrordetail.md b/docs/models/realtimetranscriptionerrordetail.md new file mode 100644 index 00000000..5b34755d --- /dev/null +++ b/docs/models/realtimetranscriptionerrordetail.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionErrorDetail + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | +| `message` | [models.RealtimeTranscriptionErrorDetailMessage](../models/realtimetranscriptionerrordetailmessage.md) | :heavy_check_mark: | Human-readable error message. | +| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerrordetailmessage.md b/docs/models/realtimetranscriptionerrordetailmessage.md new file mode 100644 index 00000000..da3764ef --- /dev/null +++ b/docs/models/realtimetranscriptionerrordetailmessage.md @@ -0,0 +1,19 @@ +# RealtimeTranscriptionErrorDetailMessage + +Human-readable error message. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + diff --git a/docs/models/realtimetranscriptioninputaudioappend.md b/docs/models/realtimetranscriptioninputaudioappend.md new file mode 100644 index 00000000..5ee365eb --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioappend.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionInputAudioAppend + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["input_audio.append"]]* | :heavy_minus_sign: | N/A | +| `audio` | *str* | :heavy_check_mark: | Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioend.md b/docs/models/realtimetranscriptioninputaudioend.md new file mode 100644 index 00000000..393d208c --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioend.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioEnd + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `type` | *Optional[Literal["input_audio.end"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioflush.md b/docs/models/realtimetranscriptioninputaudioflush.md new file mode 100644 index 00000000..367725ba --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioflush.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioFlush + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Optional[Literal["input_audio.flush"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsession.md b/docs/models/realtimetranscriptionsession.md new file mode 100644 index 00000000..750bd7f7 --- /dev/null +++ b/docs/models/realtimetranscriptionsession.md @@ -0,0 +1,11 @@ +# RealtimeTranscriptionSession + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `request_id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessioncreated.md b/docs/models/realtimetranscriptionsessioncreated.md new file mode 100644 index 00000000..34e603fd --- /dev/null +++ b/docs/models/realtimetranscriptionsessioncreated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionCreated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.created"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdated.md b/docs/models/realtimetranscriptionsessionupdated.md new file mode 100644 index 00000000..7e271995 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.updated"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdatemessage.md b/docs/models/realtimetranscriptionsessionupdatemessage.md new file mode 100644 index 00000000..2a50ca92 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdatemessage.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdateMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.update"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSessionUpdatePayload](../models/realtimetranscriptionsessionupdatepayload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdatepayload.md b/docs/models/realtimetranscriptionsessionupdatepayload.md new file mode 100644 index 00000000..d6c6547d --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdatepayload.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdatePayload + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `audio_format` | [OptionalNullable[models.AudioFormat]](../models/audioformat.md) | :heavy_minus_sign: | Set before sending audio. Audio format updates are rejected after audio starts. | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | Set before sending audio. Streaming delay updates are rejected after audio starts. | \ No newline at end of file diff --git a/docs/models/referencechunk.md b/docs/models/referencechunk.md index a132ca2f..d847e248 100644 --- a/docs/models/referencechunk.md +++ b/docs/models/referencechunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/referencechunktype.md b/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/docs/models/repositories.md b/docs/models/repositories.md deleted file mode 100644 index 02274e3d..00000000 --- a/docs/models/repositories.md +++ /dev/null @@ -1,11 +0,0 @@ -# Repositories - - -## Supported Types - -### `models.GithubRepositoryOut` - -```python -value: models.GithubRepositoryOut = /* values here */ -``` - diff --git a/docs/models/requestsource.md b/docs/models/requestsource.md new file mode 100644 index 00000000..c81c1159 --- /dev/null +++ b/docs/models/requestsource.md @@ -0,0 +1,10 @@ +# RequestSource + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `API` | api | +| `PLAYGROUND` | playground | +| `AGENT_BUILDER_V1` | agent_builder_v1 | \ No newline at end of file diff --git a/docs/models/response.md b/docs/models/response.md new file mode 100644 index 00000000..ff679257 --- /dev/null +++ b/docs/models/response.md @@ -0,0 +1,17 @@ +# Response + + +## Supported Types + +### `models.ClassifierFineTuningJob` + +```python +value: models.ClassifierFineTuningJob = /* values here */ +``` + +### `models.CompletionFineTuningJob` + +```python +value: models.CompletionFineTuningJob = /* values here */ +``` + diff --git a/docs/models/response1.md b/docs/models/response1.md deleted file mode 100644 index 2e73fdbb..00000000 --- a/docs/models/response1.md +++ /dev/null @@ -1,17 +0,0 @@ -# Response1 - - -## Supported Types - -### `models.ClassifierJobOut` - -```python -value: models.ClassifierJobOut = /* values here */ -``` - -### `models.CompletionJobOut` - -```python -value: models.CompletionJobOut = /* values here */ -``` - diff --git a/docs/models/responsebody.md b/docs/models/responsebody.md deleted file mode 100644 index 8a218517..00000000 --- a/docs/models/responsebody.md +++ /dev/null @@ -1,17 +0,0 @@ -# ResponseBody - - -## Supported Types - -### `models.ModelConversation` - -```python -value: models.ModelConversation = /* values here */ -``` - -### `models.AgentConversation` - -```python -value: models.AgentConversation = /* values here */ -``` - diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md index ec25bd6d..63d4cc06 100644 --- a/docs/models/responsedoneevent.md +++ b/docs/models/responsedoneevent.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseDoneEventType]](../models/responsedoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsedoneeventtype.md b/docs/models/responsedoneeventtype.md deleted file mode 100644 index 58f7f44d..00000000 --- a/docs/models/responsedoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseDoneEventType - - -## Values - -| Name | Value | -| ---------------------------- | ---------------------------- | -| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md index 2ea6a2e0..4309bdad 100644 --- a/docs/models/responseerrorevent.md +++ b/docs/models/responseerrorevent.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.ResponseErrorEventType]](../models/responseerroreventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `message` | *str* | :heavy_check_mark: | N/A | -| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.error"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responseerroreventtype.md b/docs/models/responseerroreventtype.md deleted file mode 100644 index 3b3fc303..00000000 --- a/docs/models/responseerroreventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseErrorEventType - - -## Values - -| Name | Value | -| ----------------------------- | ----------------------------- | -| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/docs/models/responseformats.md +++ b/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/docs/models/responseretrievemodelv1modelsmodelidget.md b/docs/models/responseretrievemodelv1modelsmodelidget.md new file mode 100644 index 00000000..ffbc1473 --- /dev/null +++ b/docs/models/responseretrievemodelv1modelsmodelidget.md @@ -0,0 +1,19 @@ +# ResponseRetrieveModelV1ModelsModelIDGet + +Successful Response + + +## Supported Types + +### `models.BaseModelCard` + +```python +value: models.BaseModelCard = /* values here */ +``` + +### `models.FTModelCard` + +```python +value: models.FTModelCard = /* values here */ +``` + diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md index 481bd5bb..e2f421af 100644 --- a/docs/models/responsestartedevent.md +++ b/docs/models/responsestartedevent.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseStartedEventType]](../models/responsestartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsestartedeventtype.md b/docs/models/responsestartedeventtype.md deleted file mode 100644 index 2d9273bd..00000000 --- a/docs/models/responsestartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseStartedEventType - - -## Values - -| Name | Value | -| ------------------------------- | ------------------------------- | -| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | \ No newline at end of file diff --git a/docs/models/responsev1conversationsget.md b/docs/models/responsev1conversationsget.md new file mode 100644 index 00000000..844c5d61 --- /dev/null +++ b/docs/models/responsev1conversationsget.md @@ -0,0 +1,19 @@ +# ResponseV1ConversationsGet + +Successful Response + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md deleted file mode 100644 index 28f97dd2..00000000 --- a/docs/models/retrievefileout.md +++ /dev/null @@ -1,19 +0,0 @@ -# RetrieveFileOut - - -## Fields - -| Field | Type | Required | Description | Example | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md b/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md deleted file mode 100644 index 3ac96521..00000000 --- a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md +++ /dev/null @@ -1,19 +0,0 @@ -# RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet - -Successful Response - - -## Supported Types - -### `models.BaseModelCard` - -```python -value: models.BaseModelCard = /* values here */ -``` - -### `models.FTModelCard` - -```python -value: models.FTModelCard = /* values here */ -``` - diff --git a/docs/models/role.md b/docs/models/role.md index affca78d..853c6257 100644 --- a/docs/models/role.md +++ b/docs/models/role.md @@ -3,6 +3,7 @@ ## Values -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/sharingdelete.md b/docs/models/sharingdelete.md index 71cacab6..1dcec095 100644 --- a/docs/models/sharingdelete.md +++ b/docs/models/sharingdelete.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `org_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingin.md b/docs/models/sharingin.md index 537ede03..bac18c8d 100644 --- a/docs/models/sharingin.md +++ b/docs/models/sharingin.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `org_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `level` | [models.ShareEnum](../models/shareenum.md) | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingout.md b/docs/models/sharingout.md index 5844fe64..35aeff43 100644 --- a/docs/models/sharingout.md +++ b/docs/models/sharingout.md @@ -10,4 +10,4 @@ | `org_id` | *str* | :heavy_check_mark: | N/A | | `role` | *str* | :heavy_check_mark: | N/A | | `share_with_type` | *str* | :heavy_check_mark: | N/A | -| `share_with_uuid` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `share_with_uuid` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/status.md b/docs/models/status.md deleted file mode 100644 index 5e22eb73..00000000 --- a/docs/models/status.md +++ /dev/null @@ -1,19 +0,0 @@ -# Status - -The current status of the fine-tuning job. - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `QUEUED` | QUEUED | -| `STARTED` | STARTED | -| `VALIDATING` | VALIDATING | -| `VALIDATED` | VALIDATED | -| `RUNNING` | RUNNING | -| `FAILED_VALIDATION` | FAILED_VALIDATION | -| `FAILED` | FAILED | -| `SUCCESS` | SUCCESS | -| `CANCELLED` | CANCELLED | -| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/stop.md b/docs/models/stop.md deleted file mode 100644 index ba40ca83..00000000 --- a/docs/models/stop.md +++ /dev/null @@ -1,19 +0,0 @@ -# Stop - -Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[str]` - -```python -value: List[str] = /* values here */ -``` - diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index 0dba71c0..10bda10f 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/systemmessagecontent.md b/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/docs/models/systemmessagecontent.md +++ b/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/docs/models/systemmessagecontentchunks.md b/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/tableformat.md b/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md index d488cb51..df0e61c3 100644 --- a/docs/models/textchunk.md +++ b/docs/models/textchunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TextChunkType]](../models/textchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["text"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/textchunktype.md b/docs/models/textchunktype.md deleted file mode 100644 index e2a2ae8b..00000000 --- a/docs/models/textchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TextChunkType - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md index 66b2e0cd..70c0369f 100644 --- a/docs/models/thinkchunk.md +++ b/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.ThinkChunkThinking](../models/thinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/docs/models/thinkchunkthinking.md b/docs/models/thinkchunkthinking.md new file mode 100644 index 00000000..dd1ecca1 --- /dev/null +++ b/docs/models/thinkchunkthinking.md @@ -0,0 +1,17 @@ +# ThinkChunkThinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/docs/models/thinkchunktype.md b/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/docs/models/timestampgranularity.md b/docs/models/timestampgranularity.md index 0d2a8054..d20012ea 100644 --- a/docs/models/timestampgranularity.md +++ b/docs/models/timestampgranularity.md @@ -5,4 +5,5 @@ | Name | Value | | --------- | --------- | -| `SEGMENT` | segment | \ No newline at end of file +| `SEGMENT` | segment | +| `WORD` | word | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 43e09050..3819236b 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -3,10 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolcallconfirmation.md b/docs/models/toolcallconfirmation.md new file mode 100644 index 00000000..1812f7d6 --- /dev/null +++ b/docs/models/toolcallconfirmation.md @@ -0,0 +1,9 @@ +# ToolCallConfirmation + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `confirmation` | [models.Confirmation](../models/confirmation.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolconfiguration.md b/docs/models/toolconfiguration.md new file mode 100644 index 00000000..89286a17 --- /dev/null +++ b/docs/models/toolconfiguration.md @@ -0,0 +1,10 @@ +# ToolConfiguration + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `exclude` | List[*str*] | :heavy_minus_sign: | N/A | +| `include` | List[*str*] | :heavy_minus_sign: | N/A | +| `requires_confirmation` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md index bfc9dc0e..7066f348 100644 --- a/docs/models/toolexecutiondeltaevent.md +++ b/docs/models/toolexecutiondeltaevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionDeltaEventType]](../models/toolexecutiondeltaeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `type` | *Literal["tool.execution.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventname.md b/docs/models/toolexecutiondeltaeventname.md new file mode 100644 index 00000000..9c3edef8 --- /dev/null +++ b/docs/models/toolexecutiondeltaeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDeltaEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutiondeltaeventtype.md b/docs/models/toolexecutiondeltaeventtype.md deleted file mode 100644 index a4a2f8cc..00000000 --- a/docs/models/toolexecutiondeltaeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionDeltaEventType - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `TOOL_EXECUTION_DELTA` | tool.execution.delta | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md index aa28df59..b2d81be3 100644 --- a/docs/models/toolexecutiondoneevent.md +++ b/docs/models/toolexecutiondoneevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionDoneEventType]](../models/toolexecutiondoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | -| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventname.md b/docs/models/toolexecutiondoneeventname.md new file mode 100644 index 00000000..6449079d --- /dev/null +++ b/docs/models/toolexecutiondoneeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDoneEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutiondoneeventtype.md b/docs/models/toolexecutiondoneeventtype.md deleted file mode 100644 index 872624c1..00000000 --- a/docs/models/toolexecutiondoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionDoneEventType - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `TOOL_EXECUTION_DONE` | tool.execution.done | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 174abdd1..03316381 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -3,13 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `object` | [Optional[models.ToolExecutionEntryObject]](../models/toolexecutionentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolExecutionEntryType]](../models/toolexecutionentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | -| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["tool.execution"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentryname.md b/docs/models/toolexecutionentryname.md new file mode 100644 index 00000000..fb762a53 --- /dev/null +++ b/docs/models/toolexecutionentryname.md @@ -0,0 +1,17 @@ +# ToolExecutionEntryName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutionentryobject.md b/docs/models/toolexecutionentryobject.md deleted file mode 100644 index 0ca79af5..00000000 --- a/docs/models/toolexecutionentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/toolexecutionentrytype.md b/docs/models/toolexecutionentrytype.md deleted file mode 100644 index a67629b8..00000000 --- a/docs/models/toolexecutionentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionEntryType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `TOOL_EXECUTION` | tool.execution | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index 82ea65e5..189b8a3d 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -3,11 +3,13 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionStartedEventType]](../models/toolexecutionstartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventname.md b/docs/models/toolexecutionstartedeventname.md new file mode 100644 index 00000000..3308c483 --- /dev/null +++ b/docs/models/toolexecutionstartedeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionStartedEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutionstartedeventtype.md b/docs/models/toolexecutionstartedeventtype.md deleted file mode 100644 index 56695d1f..00000000 --- a/docs/models/toolexecutionstartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionStartedEventType - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `TOOL_EXECUTION_STARTED` | tool.execution.started | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md index f1b54c7c..d6002175 100644 --- a/docs/models/toolfilechunk.md +++ b/docs/models/toolfilechunk.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | -| `file_id` | *str* | :heavy_check_mark: | N/A | -| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `type` | *Optional[Literal["tool_file"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktool.md b/docs/models/toolfilechunktool.md new file mode 100644 index 00000000..aa5ac8a9 --- /dev/null +++ b/docs/models/toolfilechunktool.md @@ -0,0 +1,17 @@ +# ToolFileChunkTool + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolfilechunktype.md b/docs/models/toolfilechunktype.md deleted file mode 100644 index 7e99acef..00000000 --- a/docs/models/toolfilechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolFileChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `TOOL_FILE` | tool_file | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md index a54f4933..7201481e 100644 --- a/docs/models/toolmessage.md +++ b/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagerole.md b/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md index af447aee..49ea4ca7 100644 --- a/docs/models/toolreferencechunk.md +++ b/docs/models/toolreferencechunk.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | -| `title` | *str* | :heavy_check_mark: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Optional[Literal["tool_reference"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | +| `title` | *str* | :heavy_check_mark: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunktool.md b/docs/models/toolreferencechunktool.md new file mode 100644 index 00000000..999f7c34 --- /dev/null +++ b/docs/models/toolreferencechunktool.md @@ -0,0 +1,17 @@ +# ToolReferenceChunkTool + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolreferencechunktype.md b/docs/models/toolreferencechunktype.md deleted file mode 100644 index bc57d277..00000000 --- a/docs/models/toolreferencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolReferenceChunkType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `TOOL_REFERENCE` | tool_reference | \ No newline at end of file diff --git a/docs/models/tools.md b/docs/models/tools.md deleted file mode 100644 index f308d732..00000000 --- a/docs/models/tools.md +++ /dev/null @@ -1,41 +0,0 @@ -# Tools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index bebc9f72..d7672c0e 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -3,10 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `type` | *Optional[Literal["transcription_segment"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdone.md b/docs/models/transcriptionstreamdone.md index 9ecf7d9c..bca69a2b 100644 --- a/docs/models/transcriptionstreamdone.md +++ b/docs/models/transcriptionstreamdone.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `text` | *str* | :heavy_check_mark: | N/A | -| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | -| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamDoneType]](../models/transcriptionstreamdonetype.md) | :heavy_minus_sign: | N/A | -| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `type` | *Literal["transcription.done"]* | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdonetype.md b/docs/models/transcriptionstreamdonetype.md deleted file mode 100644 index db092c4f..00000000 --- a/docs/models/transcriptionstreamdonetype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamDoneType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguage.md b/docs/models/transcriptionstreamlanguage.md index e16c8fdc..63fcfbc6 100644 --- a/docs/models/transcriptionstreamlanguage.md +++ b/docs/models/transcriptionstreamlanguage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | -| `type` | [Optional[models.TranscriptionStreamLanguageType]](../models/transcriptionstreamlanguagetype.md) | :heavy_minus_sign: | N/A | -| `audio_language` | *str* | :heavy_check_mark: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Literal["transcription.language"]* | :heavy_check_mark: | N/A | +| `audio_language` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguagetype.md b/docs/models/transcriptionstreamlanguagetype.md deleted file mode 100644 index e93521e1..00000000 --- a/docs/models/transcriptionstreamlanguagetype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamLanguageType - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `TRANSCRIPTION_LANGUAGE` | transcription.language | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index 3deeedf0..1b652a3b 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -3,10 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdeltatype.md b/docs/models/transcriptionstreamsegmentdeltatype.md deleted file mode 100644 index 03ff3e8b..00000000 --- a/docs/models/transcriptionstreamsegmentdeltatype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamSegmentDeltaType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `TRANSCRIPTION_SEGMENT` | transcription.segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md index adddfe18..77bd0ddc 100644 --- a/docs/models/transcriptionstreamtextdelta.md +++ b/docs/models/transcriptionstreamtextdelta.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamTextDeltaType]](../models/transcriptionstreamtextdeltatype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | +| `type` | *Literal["transcription.text.delta"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdeltatype.md b/docs/models/transcriptionstreamtextdeltatype.md deleted file mode 100644 index b7c9d675..00000000 --- a/docs/models/transcriptionstreamtextdeltatype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamTextDeltaType - - -## Values - -| Name | Value | -| -------------------------- | -------------------------- | -| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | \ No newline at end of file diff --git a/docs/models/two.md b/docs/models/two.md deleted file mode 100644 index 59dc2be2..00000000 --- a/docs/models/two.md +++ /dev/null @@ -1,29 +0,0 @@ -# Two - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/type.md b/docs/models/type.md deleted file mode 100644 index d05ead75..00000000 --- a/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `TRANSCRIPTION_SEGMENT` | transcription_segment | \ No newline at end of file diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/unarchiveftmodelout.md deleted file mode 100644 index 287c9a00..00000000 --- a/docs/models/unarchiveftmodelout.md +++ /dev/null @@ -1,10 +0,0 @@ -# UnarchiveFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.UnarchiveFTModelOutObject]](../models/unarchiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchiveftmodeloutobject.md b/docs/models/unarchiveftmodeloutobject.md deleted file mode 100644 index 623dcec2..00000000 --- a/docs/models/unarchiveftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# UnarchiveFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/unarchivemodelresponse.md b/docs/models/unarchivemodelresponse.md new file mode 100644 index 00000000..375962a7 --- /dev/null +++ b/docs/models/unarchivemodelresponse.md @@ -0,0 +1,10 @@ +# UnarchiveModelResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateagentrequest.md b/docs/models/updateagentrequest.md new file mode 100644 index 00000000..d3428d92 --- /dev/null +++ b/docs/models/updateagentrequest.md @@ -0,0 +1,17 @@ +# UpdateAgentRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateagentrequesttool.md b/docs/models/updateagentrequesttool.md new file mode 100644 index 00000000..e358b1ed --- /dev/null +++ b/docs/models/updateagentrequesttool.md @@ -0,0 +1,41 @@ +# UpdateAgentRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/updatedocumentrequest.md b/docs/models/updatedocumentrequest.md new file mode 100644 index 00000000..7e0b41b7 --- /dev/null +++ b/docs/models/updatedocumentrequest.md @@ -0,0 +1,9 @@ +# UpdateDocumentRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateftmodelin.md b/docs/models/updateftmodelin.md deleted file mode 100644 index 4e55b1a7..00000000 --- a/docs/models/updateftmodelin.md +++ /dev/null @@ -1,9 +0,0 @@ -# UpdateFTModelIn - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatelibraryrequest.md b/docs/models/updatelibraryrequest.md new file mode 100644 index 00000000..aaffc5a9 --- /dev/null +++ b/docs/models/updatelibraryrequest.md @@ -0,0 +1,9 @@ +# UpdateLibraryRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatemodelrequest.md b/docs/models/updatemodelrequest.md new file mode 100644 index 00000000..56b84c59 --- /dev/null +++ b/docs/models/updatemodelrequest.md @@ -0,0 +1,9 @@ +# UpdateModelRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md deleted file mode 100644 index 6f09c9a6..00000000 --- a/docs/models/uploadfileout.md +++ /dev/null @@ -1,18 +0,0 @@ -# UploadFileOut - - -## Fields - -| Field | Type | Required | Description | Example | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 63b01310..e7a932ed 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/usermessagerole.md b/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md index 199d2edd..c73952d9 100644 --- a/docs/models/wandbintegration.md +++ b/docs/models/wandbintegration.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `type` | [Optional[models.WandbIntegrationType]](../models/wandbintegrationtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | | `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md deleted file mode 100644 index cec02ed8..00000000 --- a/docs/models/wandbintegrationout.md +++ /dev/null @@ -1,12 +0,0 @@ -# WandbIntegrationOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationouttype.md b/docs/models/wandbintegrationouttype.md deleted file mode 100644 index 5a7533c9..00000000 --- a/docs/models/wandbintegrationouttype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WandbIntegrationOutType - - -## Values - -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/wandbintegrationresult.md b/docs/models/wandbintegrationresult.md new file mode 100644 index 00000000..d12bc311 --- /dev/null +++ b/docs/models/wandbintegrationresult.md @@ -0,0 +1,12 @@ +# WandbIntegrationResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationtype.md b/docs/models/wandbintegrationtype.md deleted file mode 100644 index 4fdffe22..00000000 --- a/docs/models/wandbintegrationtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WandbIntegrationType - - -## Values - -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md index 941fc2b8..78b736cd 100644 --- a/docs/models/websearchpremiumtool.md +++ b/docs/models/websearchpremiumtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `type` | [Optional[models.WebSearchPremiumToolType]](../models/websearchpremiumtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchpremiumtooltype.md b/docs/models/websearchpremiumtooltype.md deleted file mode 100644 index 348bfe85..00000000 --- a/docs/models/websearchpremiumtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WebSearchPremiumToolType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `WEB_SEARCH_PREMIUM` | web_search_premium | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md index c8d708bd..4ca7333c 100644 --- a/docs/models/websearchtool.md +++ b/docs/models/websearchtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | [Optional[models.WebSearchToolType]](../models/websearchtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtooltype.md b/docs/models/websearchtooltype.md deleted file mode 100644 index 57b6acbb..00000000 --- a/docs/models/websearchtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WebSearchToolType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `WEB_SEARCH` | web_search | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 20484120..c50456df 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -1,8 +1,9 @@ -# Accesses -(*beta.libraries.accesses*) +# Beta.Libraries.Accesses ## Overview +(beta) Libraries API - manage access to a library. + ### Available Operations * [list](#list) - List all of the access to this library. @@ -15,8 +16,9 @@ Given a library, list all of the Entity that have access and to what level. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -46,8 +48,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update_or_create @@ -55,8 +57,9 @@ Given a library id, you can create or update the access level of an entity. You ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -64,7 +67,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", org_id="aadd9ae1-f285-4437-884a-091c77efa6fd", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") # Handle response print(res) @@ -76,10 +79,10 @@ with Mistral( | Parameter | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `org_id` | *str* | :heavy_check_mark: | N/A | | `level` | [models.ShareEnum](../../models/shareenum.md) | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -90,8 +93,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -99,8 +102,9 @@ Given a library id, you can delete the access level of an entity. An owner canno ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -108,7 +112,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", org_id="0814a235-c2d0-4814-875a-4b85f93d3dc7", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") # Handle response print(res) @@ -120,9 +124,9 @@ with Mistral( | Parameter | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `org_id` | *str* | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -133,5 +137,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 6bab08dd..8a608370 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -1,5 +1,4 @@ # Agents -(*agents*) ## Overview @@ -16,8 +15,9 @@ Agents Completion ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -27,10 +27,12 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -39,24 +41,25 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessage](../../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -66,8 +69,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -75,8 +78,9 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -86,10 +90,12 @@ with Mistral( res = mistral.agents.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], agent_id="", stream=True) + ], agent_id="", stream=True, response_format={ + "type": "text", + }) with res as event_stream: for event in event_stream: @@ -100,24 +106,25 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -127,5 +134,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md deleted file mode 100644 index 2101c266..00000000 --- a/docs/sdks/audio/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Audio -(*audio*) - -## Overview - -### Available Operations diff --git a/docs/sdks/batch/README.md b/docs/sdks/batch/README.md deleted file mode 100644 index ec7d8340..00000000 --- a/docs/sdks/batch/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Batch -(*batch*) - -## Overview - -### Available Operations diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md new file mode 100644 index 00000000..3633fe4e --- /dev/null +++ b/docs/sdks/batchjobs/README.md @@ -0,0 +1,188 @@ +# Batch.Jobs + +## Overview + +### Available Operations + +* [list](#list) - Get Batch Jobs +* [create](#create) - Create Batch Job +* [get](#get) - Get Batch Job +* [cancel](#cancel) - Cancel Batch Job + +## list + +Get a list of batch jobs for your organization and user. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False, order_by="-created") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../../models/orderby.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListBatchJobsResponse](../../models/listbatchjobsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new batch job, it will be queued for processing. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `requests` | List[[models.BatchRequest](../../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + +### Response + +**[models.BatchJob](../../models/batchjob.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a batch job details by its UUID. + +Args: + inline: If True, return results inline in the response. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `inline` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJob](../../models/batchjob.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## cancel + +Request the cancellation of a batch job. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJob](../../models/batchjob.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/beta/README.md b/docs/sdks/beta/README.md deleted file mode 100644 index f5b5f822..00000000 --- a/docs/sdks/beta/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Beta -(*beta*) - -## Overview - -### Available Operations diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md new file mode 100644 index 00000000..aaa5110e --- /dev/null +++ b/docs/sdks/betaagents/README.md @@ -0,0 +1,501 @@ +# Beta.Agents + +## Overview + +(beta) Agents API + +### Available Operations + +* [create](#create) - Create a agent that can be used within a conversation. +* [list](#list) - List agent entities. +* [get](#get) - Retrieve an agent entity. +* [update](#update) - Update an agent entity. +* [delete](#delete) - Delete an agent entity. +* [update_version](#update_version) - Update an agent version. +* [list_versions](#list_versions) - List all versions of an agent. +* [get_version](#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](#delete_version_alias) - Delete an agent version alias. + +## create + +Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of agent entities sorted by creation time. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list(page=0, page_size=20) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update an agent attributes and create a new version. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update(agent_id="", completion_args={ + "response_format": { + "type": "text", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete an agent entity. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete(agent_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update_version + +Switch the version of an agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update_version(agent_id="", version=157995) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_versions + +Retrieve all versions for a specific agent with full agent context. Supports pagination. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_versions(agent_id="", page=0, page_size=20) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of versions per page | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get_version + +Get a specific agent version by version number. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get_version(agent_id="", version="788393") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create_version_alias + +Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.AgentAliasResponse](../../models/agentaliasresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_version_aliases + +Retrieve all version aliases for a specific agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_version_aliases(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.AgentAliasResponse]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete_version_alias + +Delete an existing alias for an agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete_version_alias(agent_id="", alias="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 7b467b58..1bf4aead 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -1,5 +1,4 @@ # Chat -(*chat*) ## Overview @@ -16,8 +15,9 @@ Chat Completion ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -25,12 +25,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -39,27 +41,28 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.ChatCompletionRequestMessage](../../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -69,8 +72,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -78,8 +81,9 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -87,12 +91,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.stream(model="mistral-small-latest", messages=[ + res = mistral.chat.stream(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], stream=True) + ], stream=True, response_format={ + "type": "text", + }) with res as event_stream: for event in event_stream: @@ -103,27 +109,28 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -133,5 +140,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 3f9d3a3c..dc0f4984 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -1,5 +1,4 @@ # Classifiers -(*classifiers*) ## Overview @@ -18,8 +17,9 @@ Moderations ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -27,10 +27,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate(model="Durango", inputs=[ - "", - "", - ]) + res = mistral.classifiers.moderate(model="mistral-moderation-latest", inputs="") # Handle response print(res) @@ -39,11 +36,12 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -53,8 +51,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## moderate_chat @@ -62,8 +60,9 @@ Chat Moderations ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -73,8 +72,8 @@ with Mistral( res = mistral.classifiers.moderate_chat(inputs=[ { - "content": "", "role": "tool", + "content": "", }, ], model="LeBaron") @@ -85,11 +84,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -99,8 +98,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## classify @@ -108,8 +107,9 @@ Classifications ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -117,7 +117,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify(model="Silverado", inputs=[ + res = mistral.classifiers.classify(model="mistral-moderation-latest", inputs=[ "", ]) @@ -128,11 +128,12 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -142,8 +143,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## classify_chat @@ -151,8 +152,9 @@ Chat Classifications ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -160,12 +162,12 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify_chat(model="Camry", inputs=[ + res = mistral.classifiers.classify_chat(model="Camry", input=[ { "messages": [ { - "content": "", "role": "system", + "content": "", }, ], }, @@ -181,7 +183,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | +| `input` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -192,5 +194,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 2eceb451..e77d329b 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -1,5 +1,4 @@ -# Conversations -(*beta.conversations*) +# Beta.Conversations ## Overview @@ -10,6 +9,7 @@ * [start](#start) - Create a conversation and append entries to it. * [list](#list) - List all created conversations. * [get](#get) - Retrieve a conversation information. +* [delete](#delete) - Delete a conversation. * [append](#append) - Append new entries to an existing conversation. * [get_history](#get_history) - Retrieve all entries in a conversation. * [get_messages](#get_messages) - Retrieve all messages in a conversation. @@ -24,8 +24,9 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -33,7 +34,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start(inputs="", stream=False) + res = mistral.beta.conversations.start(inputs="", stream=False, completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -42,20 +47,22 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | N/A | -| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -65,8 +72,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -74,8 +81,9 @@ Retrieve a list of conversation entities sorted by creation time. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -96,18 +104,19 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[List[models.ResponseBody]](../../models/.md)** +**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -115,8 +124,9 @@ Given a conversation_id retrieve a conversation entity with its attributes. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -140,14 +150,50 @@ with Mistral( ### Response -**[models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet](../../models/agentsapiv1conversationsgetresponsev1conversationsget.md)** +**[models.ResponseV1ConversationsGet](../../models/responsev1conversationsget.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a conversation given a conversation_id. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.conversations.delete(conversation_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## append @@ -155,8 +201,9 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -164,7 +211,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server") + res = mistral.beta.conversations.append(conversation_id="", stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -176,11 +227,12 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -191,8 +243,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_history @@ -200,8 +252,9 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -231,8 +284,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_messages @@ -240,8 +293,9 @@ Given a conversation_id retrieve all the messages belonging to that conversation ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -271,8 +325,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## restart @@ -280,8 +334,9 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -289,7 +344,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server") + res = mistral.beta.conversations.restart(conversation_id="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -301,12 +360,14 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -317,8 +378,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## start_stream @@ -326,8 +387,9 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -342,7 +404,11 @@ with Mistral( "tool_call_id": "", "result": "", }, - ], stream=True) + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -360,11 +426,13 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTool](../../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -376,8 +444,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## append_stream @@ -385,8 +453,9 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -394,7 +463,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server") + res = mistral.beta.conversations.append_stream(conversation_id="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -408,11 +481,12 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -423,8 +497,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## restart_stream @@ -432,8 +506,9 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -441,15 +516,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart_stream(conversation_id="", inputs=[ - { - "object": "entry", - "type": "message.input", - "role": "assistant", - "content": "", - "prefix": False, + res = mistral.beta.conversations.restart_stream(conversation_id="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", }, - ], from_entry_id="", stream=True, store=True, handoff_execution="server") + }) with res as event_stream: for event in event_stream: @@ -463,12 +534,14 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -479,5 +552,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 05ae6f74..9c219b67 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -1,11 +1,12 @@ -# Documents -(*beta.libraries.documents*) +# Beta.Libraries.Documents ## Overview +(beta) Libraries API - manage documents in a library. + ### Available Operations -* [list](#list) - List document in a given library. +* [list](#list) - List documents in a given library. * [upload](#upload) - Upload a new document. * [get](#get) - Retrieve the metadata of a specific document. * [update](#update) - Update the metadata of a specific document. @@ -22,8 +23,9 @@ Given a library, lists the document that have been uploaded to that library. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -46,20 +48,21 @@ with Mistral( | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | | `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.ListDocumentOut](../../models/listdocumentout.md)** +**[models.ListDocumentsResponse](../../models/listdocumentsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## upload @@ -67,8 +70,9 @@ Given a library, upload a new document to that library. It is queued for process ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -96,14 +100,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -111,8 +115,9 @@ Given a library and a document in this library, you can retrieve the metadata of ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -137,14 +142,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -152,8 +157,9 @@ Given a library and a document in that library, update the name of that document ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -175,18 +181,19 @@ with Mistral( | `library_id` | *str* | :heavy_check_mark: | N/A | | `document_id` | *str* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../../models/attributes.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -194,8 +201,9 @@ Given a library and a document in that library, delete that document. The docume ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -221,8 +229,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## text_content @@ -230,8 +238,9 @@ Given a library and a document in that library, you can retrieve the text conten ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -262,8 +271,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## status @@ -271,8 +280,9 @@ Given a library and a document in that library, retrieve the processing status o ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -303,8 +313,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_signed_url @@ -312,8 +322,9 @@ Given a library and a document in that library, retrieve the signed URL of a spe ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -338,14 +349,14 @@ with Mistral( ### Response -**[str](../../models/.md)** +**[str](../../models/responselibrariesdocumentsgetsignedurlv1.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## extracted_text_signed_url @@ -353,8 +364,9 @@ Given a library and a document in that library, retrieve the signed URL of text ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -379,14 +391,14 @@ with Mistral( ### Response -**[str](../../models/.md)** +**[str](../../models/responselibrariesdocumentsgetextractedtextsignedurlv1.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## reprocess @@ -394,8 +406,9 @@ Given a library and a document in that library, reprocess that document, it will ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -421,5 +434,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 91e33138..eecb5c9e 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -1,5 +1,4 @@ # Embeddings -(*embeddings*) ## Overview @@ -15,8 +14,9 @@ Embeddings ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -36,13 +36,15 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -52,5 +54,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index bc39f2e4..9507326b 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -1,5 +1,4 @@ # Files -(*files*) ## Overview @@ -24,8 +23,9 @@ Please contact us if you need to increase these storage limits. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -53,13 +53,13 @@ with Mistral( ### Response -**[models.UploadFileOut](../../models/uploadfileout.md)** +**[models.CreateFileResponse](../../models/createfileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -67,8 +67,9 @@ Returns a list of files that belong to the user's organization. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -76,7 +77,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list(page=0, page_size=100) + res = mistral.files.list(page=0, page_size=100, include_total=True) # Handle response print(res) @@ -89,21 +90,23 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `sample_type` | List[[models.SampleType](../../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `purpose` | [OptionalNullable[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.ListFilesOut](../../models/listfilesout.md)** +**[models.ListFilesResponse](../../models/listfilesresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -111,8 +114,9 @@ Returns information about a specific file. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -136,13 +140,13 @@ with Mistral( ### Response -**[models.RetrieveFileOut](../../models/retrievefileout.md)** +**[models.GetFileResponse](../../models/getfileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -150,8 +154,9 @@ Delete a file. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -175,13 +180,13 @@ with Mistral( ### Response -**[models.DeleteFileOut](../../models/deletefileout.md)** +**[models.DeleteFileResponse](../../models/deletefileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## download @@ -189,8 +194,9 @@ Download a file ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -220,7 +226,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_signed_url @@ -228,8 +234,9 @@ Get Signed Url ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -254,10 +261,10 @@ with Mistral( ### Response -**[models.FileSignedURL](../../models/filesignedurl.md)** +**[models.GetSignedURLResponse](../../models/getsignedurlresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 10e6255d..49151bf5 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -1,5 +1,4 @@ # Fim -(*fim*) ## Overview @@ -16,8 +15,9 @@ FIM completion. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") + res = mistral.fim.complete(model="codestral-latest", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -36,7 +36,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -44,6 +44,7 @@ with Mistral( | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -56,8 +57,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -65,8 +66,9 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -74,7 +76,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") + res = mistral.fim.stream(model="codestral-latest", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: @@ -87,7 +89,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -95,6 +97,7 @@ with Mistral( | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -107,5 +110,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md deleted file mode 100644 index 3e0f12ce..00000000 --- a/docs/sdks/finetuning/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# FineTuning -(*fine_tuning*) - -## Overview - -### Available Operations diff --git a/docs/sdks/finetuningjobs/README.md b/docs/sdks/finetuningjobs/README.md new file mode 100644 index 00000000..4262b3a9 --- /dev/null +++ b/docs/sdks/finetuningjobs/README.md @@ -0,0 +1,232 @@ +# FineTuning.Jobs + +## Overview + +### Available Operations + +* [list](#list) - Get Fine Tuning Jobs +* [create](#create) - Create Fine Tuning Job +* [get](#get) - Get Fine Tuning Job +* [cancel](#cancel) - Cancel Fine Tuning Job +* [start](#start) - Start Fine Tuning Job + +## list + +Get a list of fine-tuning jobs for your organization and user. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListFineTuningJobsResponse](../../models/listfinetuningjobsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new fine-tuning job, it will be queued for processing. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ + "learning_rate": 0.0001, + }, invalid_sample_skip_percentage=0) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | +| `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a fine-tuned job details by its UUID. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## cancel + +Request the cancellation of a fine tuning job. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## start + +Request the start of a validated fine tuning job. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md deleted file mode 100644 index 1e240c33..00000000 --- a/docs/sdks/jobs/README.md +++ /dev/null @@ -1,228 +0,0 @@ -# Jobs -(*fine_tuning.jobs*) - -## Overview - -### Available Operations - -* [list](#list) - Get Fine Tuning Jobs -* [create](#create) - Create Fine Tuning Job -* [get](#get) - Get Fine Tuning Job -* [cancel](#cancel) - Cancel Fine Tuning Job -* [start](#start) - Start Fine Tuning Job - -## list - -Get a list of fine-tuning jobs for your organization and user. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.JobsOut](../../models/jobsout.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## create - -Create a new fine-tuning job, it will be queued for processing. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ - "learning_rate": 0.0001, - }, invalid_sample_skip_percentage=0) - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | -| `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | -| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegrations](../../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | -| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | -| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## get - -Get a fine-tuned job details by its UUID. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## cancel - -Request the cancellation of a fine tuning job. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## start - -Request the start of a validated fine tuning job. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `job_id` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 9c709d0b..7df1ef4e 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -1,9 +1,8 @@ -# Libraries -(*beta.libraries*) +# Beta.Libraries ## Overview -(beta) Libraries API for indexing documents to enhance agent capabilities. +(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities. ### Available Operations @@ -19,8 +18,9 @@ List all libraries that you have created or have been shared with you. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -43,13 +43,13 @@ with Mistral( ### Response -**[models.ListLibraryOut](../../models/listlibraryout.md)** +**[models.ListLibrariesResponse](../../models/listlibrariesresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -57,8 +57,9 @@ Create a new Library, you will be marked as the owner and only you will have the ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -84,14 +85,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -99,8 +100,9 @@ Given a library id, details information about that Library. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -124,14 +126,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -139,8 +141,9 @@ Given a library id, deletes it together with all documents that have been upload ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -164,14 +167,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -179,8 +182,9 @@ Given a library id, you can update the name and description. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -206,11 +210,11 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/mistral/README.md b/docs/sdks/mistral/README.md deleted file mode 100644 index 4b9573d0..00000000 --- a/docs/sdks/mistral/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Mistral SDK - -## Overview - -Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. - -### Available Operations diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md deleted file mode 100644 index 58082d21..00000000 --- a/docs/sdks/mistralagents/README.md +++ /dev/null @@ -1,229 +0,0 @@ -# MistralAgents -(*beta.agents*) - -## Overview - -(beta) Agents API - -### Available Operations - -* [create](#create) - Create a agent that can be used within a conversation. -* [list](#list) - List agent entities. -* [get](#get) - Retrieve an agent entity. -* [update](#update) - Update an agent entity. -* [update_version](#update_version) - Update an agent version. - -## create - -Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.beta.agents.create(model="LeBaron", name="") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTools](../../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.Agent](../../models/agent.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | - -## list - -Retrieve a list of agent entities sorted by creation time. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.beta.agents.list(page=0, page_size=20) - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[List[models.Agent]](../../models/.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | - -## get - -Given an agent retrieve an agent entity with its attributes. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.beta.agents.get(agent_id="") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.Agent](../../models/agent.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | - -## update - -Update an agent attributes and create a new version. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.beta.agents.update(agent_id="") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTools](../../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.Agent](../../models/agent.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | - -## update_version - -Switch the version of an agent. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.beta.agents.update_version(agent_id="", version=157995) - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `version` | *int* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.Agent](../../models/agent.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md deleted file mode 100644 index ef1e1549..00000000 --- a/docs/sdks/mistraljobs/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# MistralJobs -(*batch.jobs*) - -## Overview - -### Available Operations - -* [list](#list) - Get Batch Jobs -* [create](#create) - Create Batch Job -* [get](#get) - Get Batch Job -* [cancel](#cancel) - Cancel Batch Job - -## list - -Get a list of batch jobs for your organization and user. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.BatchJobsOut](../../models/batchjobsout.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## create - -Create a new batch job, it will be queued for processing. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.batch.jobs.create(input_files=[ - "fe3343a2-3b8d-404b-ba32-a78dede2614a", - ], endpoint="/v1/moderations", timeout_hours=24) - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | -| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.BatchJobOut](../../models/batchjobout.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## get - -Get a batch job details by its UUID. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `job_id` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.BatchJobOut](../../models/batchjobout.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | - -## cancel - -Request the cancellation of a batch job. - -### Example Usage - -```python -from mistralai import Mistral -import os - - -with Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), -) as mistral: - - res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") - - # Handle response - print(res) - -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `job_id` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - -### Response - -**[models.BatchJobOut](../../models/batchjobout.md)** - -### Errors - -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 7dd5d1de..311a2db6 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -1,5 +1,4 @@ # Models -(*models*) ## Overview @@ -20,8 +19,9 @@ List all models available to the user. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -48,10 +48,9 @@ with Mistral( ### Errors -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -59,8 +58,9 @@ Retrieve information about a model. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -84,14 +84,14 @@ with Mistral( ### Response -**[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet](../../models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md)** +**[models.ResponseRetrieveModelV1ModelsModelIDGet](../../models/responseretrievemodelv1modelsmodelidget.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -99,8 +99,9 @@ Delete a fine-tuned model. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -130,8 +131,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -139,8 +140,9 @@ Update a model name or description. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -172,7 +174,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## archive @@ -180,8 +182,9 @@ Archive a fine-tuned model. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -205,13 +208,13 @@ with Mistral( ### Response -**[models.ArchiveFTModelOut](../../models/archiveftmodelout.md)** +**[models.ArchiveModelResponse](../../models/archivemodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## unarchive @@ -219,8 +222,9 @@ Un-archive a fine-tuned model. ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -244,10 +248,10 @@ with Mistral( ### Response -**[models.UnarchiveFTModelOut](../../models/unarchiveftmodelout.md)** +**[models.UnarchiveModelResponse](../../models/unarchivemodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 2188f378..fde2a823 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -1,5 +1,4 @@ # Ocr -(*ocr*) ## Overview @@ -15,8 +14,9 @@ OCR ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -25,10 +25,12 @@ with Mistral( ) as mistral: res = mistral.ocr.process(model="CX-9", document={ - "image_url": { - "url": "https://measly-scrap.com", - }, - "type": "image_url", + "type": "document_url", + "document_url": "https://upset-labourer.net/", + }, bbox_annotation_format={ + "type": "text", + }, document_annotation_format={ + "type": "text", }) # Handle response @@ -38,18 +40,22 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `document` | [models.DocumentUnion](../../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -59,5 +65,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index fcac2467..97703c9b 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -1,5 +1,4 @@ -# Transcriptions -(*audio.transcriptions*) +# Audio.Transcriptions ## Overview @@ -8,7 +7,7 @@ API for audio transcription. ### Available Operations * [complete](#complete) - Create Transcription -* [stream](#stream) - Create streaming transcription (SSE) +* [stream](#stream) - Create Streaming Transcription (SSE) ## complete @@ -16,8 +15,9 @@ Create Transcription ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.complete(model="Model X") + res = mistral.audio.transcriptions.complete(model="voxtral-mini-latest", diarize=False) # Handle response print(res) @@ -34,16 +34,18 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | -| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | -| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | -| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -53,16 +55,17 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream -Create streaming transcription (SSE) +Create Streaming Transcription (SSE) ### Example Usage + ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -70,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.stream(model="Camry") + res = mistral.audio.transcriptions.stream(model="Camry", diarize=False) with res as event_stream: for event in event_stream: @@ -89,6 +92,8 @@ with Mistral( | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -100,4 +105,4 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/azure/.env.example b/examples/azure/.env.example new file mode 100644 index 00000000..7467bf2e --- /dev/null +++ b/examples/azure/.env.example @@ -0,0 +1,4 @@ +AZURE_API_KEY=your-azure-api-key +AZURE_ENDPOINT=https://your-endpoint.services.ai.azure.com/models +AZURE_MODEL=your-deployment-name +AZURE_API_VERSION=2024-05-01-preview diff --git a/examples/azure/az_chat_no_streaming.py.py b/examples/azure/az_chat_no_streaming.py.py deleted file mode 100644 index 485b594e..00000000 --- a/examples/azure/az_chat_no_streaming.py.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai_azure import MistralAzure - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -res = client.chat.complete( - messages=[ - {"role": "user", "content": "What is the capital of France?"}, - ], - # you don't need model as it will always be "azureai" -) -print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py b/examples/azure/chat_no_streaming.py new file mode 100644 index 00000000..952b171d --- /dev/null +++ b/examples/azure/chat_no_streaming.py @@ -0,0 +1,22 @@ +import os + +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.models import ChatCompletionRequestMessage, UserMessage + +AZURE_API_KEY = os.environ.get("AZURE_API_KEY", "") +AZURE_ENDPOINT = os.environ.get("AZURE_ENDPOINT", "") +AZURE_MODEL = os.environ.get("AZURE_MODEL", "mistral-small-2503") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +client = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +messages: list[ChatCompletionRequestMessage] = [ + UserMessage(content="What is the capital of France?"), +] +res = client.chat.complete(model=AZURE_MODEL, messages=messages) +print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py.py b/examples/azure/chat_no_streaming.py.py deleted file mode 100644 index 485b594e..00000000 --- a/examples/azure/chat_no_streaming.py.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai_azure import MistralAzure - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -res = client.chat.complete( - messages=[ - {"role": "user", "content": "What is the capital of France?"}, - ], - # you don't need model as it will always be "azureai" -) -print(res.choices[0].message.content) diff --git a/examples/gcp/.env.example b/examples/gcp/.env.example new file mode 100644 index 00000000..6721bd37 --- /dev/null +++ b/examples/gcp/.env.example @@ -0,0 +1,3 @@ +GCP_PROJECT_ID=your-gcp-project-id +GCP_REGION=us-central1 +GCP_MODEL=mistral-small-2503 diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py index 178f151c..61a2d076 100755 --- a/examples/gcp/async_chat_no_streaming.py +++ b/examples/gcp/async_chat_no_streaming.py @@ -1,19 +1,43 @@ #!/usr/bin/env python +""" +Example: Async chat completion with GCP Vertex AI. + +The SDK automatically: +- Detects credentials via google.auth.default() +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from project_id and region + +Prerequisites: + gcloud auth application-default login + +Usage: + GCP_PROJECT_ID=your-project GCP_REGION=us-central1 GCP_MODEL=mistral-small-2503 python async_chat_no_streaming.py +""" import asyncio import os -from mistralai_gcp import MistralGoogleCloud -from mistralai_gcp.models.usermessage import UserMessage +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.models import UserMessage +# Configuration from environment variables +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") # Optional: auto-detected from credentials +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") -async def main(): - model = "mistral-large-2407" - client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) +async def main(): + # The SDK automatically handles: + # - Credential detection via google.auth.default() + # - Token refresh when expired + # - Vertex AI URL construction + client = MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) chat_response = await client.chat.complete_async( - model=model, + model=GCP_MODEL, messages=[UserMessage(content="What is the best French cheese?")], ) diff --git a/examples/gcp/gcp_async_chat_no_streaming.py b/examples/gcp/gcp_async_chat_no_streaming.py deleted file mode 100755 index 178f151c..00000000 --- a/examples/gcp/gcp_async_chat_no_streaming.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai_gcp import MistralGoogleCloud -from mistralai_gcp.models.usermessage import UserMessage - - -async def main(): - model = "mistral-large-2407" - - client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) - - chat_response = await client.chat.complete_async( - model=model, - messages=[UserMessage(content="What is the best French cheese?")], - ) - - print(chat_response.choices[0].message.content) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/mistral/agents/async_agents_no_streaming.py b/examples/mistral/agents/async_agents_no_streaming.py index 45f300ac..6041cad3 100755 --- a/examples/mistral/agents/async_agents_no_streaming.py +++ b/examples/mistral/agents/async_agents_no_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/agents/async_conversation_agent.py b/examples/mistral/agents/async_conversation_agent.py index 54f002ac..981f13c7 100644 --- a/examples/mistral/agents/async_conversation_agent.py +++ b/examples/mistral/agents/async_conversation_agent.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 9e118037..bb96ed78 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -2,55 +2,26 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel -MODEL = "mistral-medium-latest" - - -def math_question_generator(question_num: int): - """Random generator of mathematical question - - Args: - question_num (int): the number of the question that will be returned, should be between 1-100 - """ - return ( - "solve the following differential equation: `y'' + 3y' + 2y = 0`" - if question_num % 2 == 0 - else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" - ) +MODEL = "mistral-medium-2505" async def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) - class Explanation(BaseModel): - explanation: str - output: str - - class MathDemonstration(BaseModel): - steps: list[Explanation] - final_answer: str + class MathResult(BaseModel): + answer: int - async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: - # register a new function that can be executed on the client side - run_ctx.register_func(math_question_generator) + async with RunContext(model=MODEL, output_format=MathResult) as run_ctx: run_result = await client.beta.conversations.run_async( run_ctx=run_ctx, - instructions="Use the code interpreter to help you when asked mathematical questions.", - inputs=[ - {"role": "user", "content": "hey"}, - {"role": "assistant", "content": "hello"}, - {"role": "user", "content": "Request a math question and answer it."}, - ], - tools=[{"type": "code_interpreter"}], + inputs=[{"role": "user", "content": "What is 2 + 2?"}], ) - print("All run entries:") - for entry in run_result.output_entries: - print(f"{entry}") - print(f"Final model: {run_result.output_as_model}") + print(f"Result: {run_result.output_as_model}") if __name__ == "__main__": diff --git a/examples/mistral/agents/async_conversation_run_code_interpreter.py b/examples/mistral/agents/async_conversation_run_code_interpreter.py new file mode 100644 index 00000000..10c81d77 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_code_interpreter.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.client.types import BaseModel + +MODEL = "mistral-medium-2505" + + +def math_question_generator(question_num: int): + """Random generator of mathematical question + + Args: + question_num (int): the number of the question that will be returned, should be between 1-100 + """ + return ( + "solve the following differential equation: `y'' + 3y' + 2y = 0`" + if question_num % 2 == 0 + else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" + ) + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: + # register a new function that can be executed on the client side + run_ctx.register_func(math_question_generator) + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + instructions="Use the code interpreter to help you when asked mathematical questions.", + inputs=[ + {"role": "user", "content": "hey"}, + {"role": "assistant", "content": "hello"}, + {"role": "user", "content": "Request a math question and answer it."}, + ], + tools=[{"type": "code_interpreter"}], + ) + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/agents/async_conversation_run_mcp.py b/examples/mistral/agents/async_conversation_run_mcp.py index 0e373715..52550004 100644 --- a/examples/mistral/agents/async_conversation_run_mcp.py +++ b/examples/mistral/agents/async_conversation_run_mcp.py @@ -3,7 +3,7 @@ import os import random -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mcp import StdioServerParameters from mistralai.extra.mcp.stdio import ( @@ -11,7 +11,7 @@ ) from pathlib import Path -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel cwd = Path(__file__).parent MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote.py b/examples/mistral/agents/async_conversation_run_mcp_remote.py index 7b2f46a6..d6fac492 100644 --- a/examples/mistral/agents/async_conversation_run_mcp_remote.py +++ b/examples/mistral/agents/async_conversation_run_mcp_remote.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mistralai.extra.mcp.sse import ( diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py index f69d8096..c255895e 100644 --- a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py +++ b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py @@ -5,7 +5,7 @@ import threading import webbrowser -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mistralai.extra.mcp.sse import ( diff --git a/examples/mistral/agents/async_conversation_run_stream.py b/examples/mistral/agents/async_conversation_run_stream.py index 1e6ad87b..431b9cc9 100644 --- a/examples/mistral/agents/async_conversation_run_stream.py +++ b/examples/mistral/agents/async_conversation_run_stream.py @@ -3,14 +3,14 @@ import os import random -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mcp import StdioServerParameters from mistralai.extra.mcp.stdio import MCPClientSTDIO from pathlib import Path from mistralai.extra.run.result import RunResult -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel cwd = Path(__file__).parent MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_multi_turn_conversation.py b/examples/mistral/agents/async_multi_turn_conversation.py new file mode 100644 index 00000000..26c2378f --- /dev/null +++ b/examples/mistral/agents/async_multi_turn_conversation.py @@ -0,0 +1,69 @@ +import os +from mistralai.client import Mistral + +from mistralai.extra.run.context import RunContext +import logging +import time +import asyncio + + +MODEL = "mistral-medium-latest" + +USER_MESSAGE = """ +Please make the Secret Santa for me +To properly do it you need to: +- Get the friend you were assigned to (using the get_secret_santa_assignment function) +- Read into his gift wishlist what they would like to receive (using the get_gift_wishlist function) +- Buy the gift (using the buy_gift function) +- Find the best website to buy the gift using a web search +- Send it to them (using the send_gift function) +""" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + mistral_agent_id = os.environ["MISTRAL_AGENT_ID"] + client = Mistral( + api_key=api_key, debug_logger=logging.getLogger("mistralai") + ) + + async with RunContext( + agent_id=mistral_agent_id + ) as run_context: + run_context.register_func(get_secret_santa_assignment) + run_context.register_func(get_gift_wishlist) + run_context.register_func(buy_gift) + run_context.register_func(send_gift) + + await client.beta.conversations.run_async( + run_ctx=run_context, + inputs=USER_MESSAGE, + ) + + +def get_secret_santa_assignment(): + """Get the friend you were assigned to""" + time.sleep(2) + return "John Doe" + + +def get_gift_wishlist(friend_name: str): + """Get the gift wishlist of the friend you were assigned to""" + time.sleep(1.5) + return ["Book", "Chocolate", "T-Shirt"] + + +def buy_gift(gift_name: str): + """Buy the gift you want to send to your friend""" + time.sleep(1.1) + return f"Bought {gift_name}" + + +def send_gift(friend_name: str, gift_name: str, website: str): + """Send the gift to your friend""" + time.sleep(2.2) + return f"Sent {gift_name} to {friend_name} bought on {website}" + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py new file mode 100644 index 00000000..7653b0ed --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py @@ -0,0 +1,473 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import difflib +import os +import sys +from dataclasses import dataclass +from typing import AsyncIterator, Sequence + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai.client import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +from pyaudio_utils import load_pyaudio + +console = Console() + + +@dataclass +class DualTranscriptState: + """Tracks transcript state for dual-delay transcription.""" + + fast_full_text: str = "" + slow_full_text: str = "" + fast_status: str = "🔌 Connecting..." + slow_status: str = "🔌 Connecting..." + error: str | None = None + fast_done: bool = False + slow_done: bool = False + + def set_error(self, message: str) -> None: + self.error = message + self.fast_status = "❌ Error" + self.slow_status = "❌ Error" + + +class DualTranscriptDisplay: + """Renders a live dual-delay transcription UI.""" + + def __init__( + self, + *, + model: str, + fast_delay_ms: int, + slow_delay_ms: int, + state: DualTranscriptState, + ) -> None: + self.model = model + self.fast_delay_ms = fast_delay_ms + self.slow_delay_ms = slow_delay_ms + self.state = state + + @staticmethod + def _normalize_word(word: str) -> str: + return word.strip(".,!?;:\"'()[]{}").lower() + + def _compute_display_texts(self) -> tuple[str, str]: + slow_words = self.state.slow_full_text.split() + fast_words = self.state.fast_full_text.split() + + if not slow_words: + partial_text = f" {self.state.fast_full_text}".rstrip() + return "", partial_text + + slow_norm = [self._normalize_word(word) for word in slow_words] + fast_norm = [self._normalize_word(word) for word in fast_words] + + matcher = difflib.SequenceMatcher(None, slow_norm, fast_norm) + last_fast_index = 0 + slow_progress = 0 + for block in matcher.get_matching_blocks(): + if block.size == 0: + continue + slow_end = block.a + block.size + if slow_end > slow_progress: + slow_progress = slow_end + last_fast_index = block.b + block.size + + if last_fast_index < len(fast_words): + ahead_words = fast_words[last_fast_index:] + partial_text = " " + " ".join(ahead_words) if ahead_words else "" + else: + partial_text = "" + + return self.state.slow_full_text, partial_text + + @staticmethod + def _status_style(status: str) -> str: + if "Listening" in status: + return "green" + if "Connecting" in status: + return "yellow dim" + if "Done" in status or "Stopped" in status: + return "dim" + return "red" + + def render(self) -> Layout: + layout = Layout() + + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + header_text.append(" │ ", style="dim") + header_text.append( + f"fast {self.fast_delay_ms}ms", style="bright_yellow" + ) + header_text.append( + f" {self.state.fast_status}", + style=self._status_style(self.state.fast_status), + ) + header_text.append(" │ ", style="dim") + header_text.append(f"slow {self.slow_delay_ms}ms", style="white") + header_text.append( + f" {self.state.slow_status}", + style=self._status_style(self.state.slow_status), + ) + + header = Align.left(header_text, vertical="middle", pad=False) + + final_text, partial_text = self._compute_display_texts() + transcript_text = Text() + if final_text or partial_text: + transcript_text.append(final_text, style="white") + transcript_text.append(partial_text, style="bright_yellow") + else: + transcript_text.append("...", style="dim") + + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + if self.state.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.state.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + pyaudio = load_pyaudio() + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +async def queue_audio_iter( + queue: asyncio.Queue[bytes | None], +) -> AsyncIterator[bytes]: + """Yield audio chunks from a queue until a None sentinel is received.""" + while True: + chunk = await queue.get() + if chunk is None: + break + yield chunk + + +async def broadcast_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, + queues: Sequence[asyncio.Queue[bytes | None]], +) -> None: + """Read from the microphone once and broadcast to multiple queues.""" + try: + async for chunk in iter_microphone( + sample_rate=sample_rate, chunk_duration_ms=chunk_duration_ms + ): + for queue in queues: + await queue.put(chunk) + finally: + for queue in queues: + while True: + try: + queue.put_nowait(None) + break + except asyncio.QueueFull: + try: + queue.get_nowait() + except asyncio.QueueEmpty: + break + + +def _status_for_event(event: object) -> str: + if isinstance(event, RealtimeTranscriptionSessionCreated): + return "🎤 Listening..." + return "✅ Done" + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Dual-delay real-time microphone transcription." + ) + parser.add_argument( + "--model", + default="voxtral-mini-transcribe-realtime-2602", + help="Model ID", + ) + parser.add_argument( + "--fast-delay-ms", + type=int, + default=240, + help="Fast target streaming delay in ms", + ) + parser.add_argument( + "--slow-delay-ms", + type=int, + default=2400, + help="Slow target streaming delay in ms", + ) + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", + type=int, + default=10, + help="Chunk duration in ms", + ) + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def run_stream( + *, + client: Mistral, + model: str, + delay_ms: int, + audio_stream: AsyncIterator[bytes], + audio_format: AudioFormat, + state: DualTranscriptState, + update_queue: asyncio.Queue[None], + is_fast: bool, +) -> None: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=audio_stream, + model=model, + audio_format=audio_format, + target_streaming_delay_ms=delay_ms, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + if is_fast: + state.fast_status = _status_for_event(event) + else: + state.slow_status = _status_for_event(event) + elif isinstance(event, TranscriptionStreamTextDelta): + if is_fast: + state.fast_full_text += event.text + else: + state.slow_full_text += event.text + elif isinstance(event, TranscriptionStreamDone): + if is_fast: + state.fast_status = _status_for_event(event) + state.fast_done = True + else: + state.slow_status = _status_for_event(event) + state.slow_done = True + break + elif isinstance(event, RealtimeTranscriptionError): + state.set_error(str(event.error)) + break + elif isinstance(event, UnknownRealtimeEvent): + continue + + if update_queue.empty(): + update_queue.put_nowait(None) + except Exception as exc: # pragma: no cover - safety net for UI demo + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + + +async def ui_loop( + display: DualTranscriptDisplay, + update_queue: asyncio.Queue[None], + stop_event: asyncio.Event, + *, + refresh_hz: float = 12.0, +) -> None: + with Live( + display.render(), console=console, refresh_per_second=refresh_hz, screen=True + ) as live: + while not stop_event.is_set(): + try: + await asyncio.wait_for(update_queue.get(), timeout=0.25) + except asyncio.TimeoutError: + pass + live.update(display.render()) + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + + state = DualTranscriptState() + display = DualTranscriptDisplay( + model=args.model, + fast_delay_ms=args.fast_delay_ms, + slow_delay_ms=args.slow_delay_ms, + state=state, + ) + + client = Mistral(api_key=api_key, server_url=args.base_url) + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + fast_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + slow_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + + stop_event = asyncio.Event() + update_queue: asyncio.Queue[None] = asyncio.Queue(maxsize=1) + + broadcaster = asyncio.create_task( + broadcast_microphone( + sample_rate=args.sample_rate, + chunk_duration_ms=args.chunk_duration, + queues=(fast_queue, slow_queue), + ) + ) + + fast_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.fast_delay_ms, + audio_stream=queue_audio_iter(fast_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=True, + ) + ) + + slow_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.slow_delay_ms, + audio_stream=queue_audio_iter(slow_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=False, + ) + ) + + ui_task = asyncio.create_task( + ui_loop(display, update_queue, stop_event, refresh_hz=12.0) + ) + + try: + while True: + await asyncio.sleep(0.1) + for task in (broadcaster, fast_task, slow_task): + if not task.done(): + continue + exc = task.exception() + if exc: + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + stop_event.set() + break + if state.error: + stop_event.set() + break + if state.fast_done and state.slow_done: + stop_event.set() + break + except KeyboardInterrupt: + state.fast_status = "⏹️ Stopped" + state.slow_status = "⏹️ Stopped" + stop_event.set() + finally: + broadcaster.cancel() + fast_task.cancel() + slow_task.cancel() + await asyncio.gather(broadcaster, fast_task, slow_task, return_exceptions=True) + await ui_task + + return 0 if not state.error else 1 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py new file mode 100644 index 00000000..49568aea --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import os +import sys +from typing import AsyncIterator + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai.client import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +from pyaudio_utils import load_pyaudio + +console = Console() + + +class TranscriptDisplay: + """Manages the live transcript display.""" + + def __init__(self, model: str, target_streaming_delay_ms: int | None) -> None: + self.model = model + self.target_streaming_delay_ms = target_streaming_delay_ms + self.transcript = "" + self.status = "🔌 Connecting..." + self.error: str | None = None + + def set_listening(self) -> None: + self.status = "🎤 Listening..." + + def add_text(self, text: str) -> None: + self.transcript += text + + def set_done(self) -> None: + self.status = "✅ Done" + + def set_error(self, error: str) -> None: + self.status = "❌ Error" + self.error = error + + def render(self) -> Layout: + layout = Layout() + + # Create minimal header + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + if self.target_streaming_delay_ms is not None: + header_text.append( + f" · delay {self.target_streaming_delay_ms}ms", style="dim" + ) + header_text.append(" │ ", style="dim") + + if "Listening" in self.status: + status_style = "green" + elif "Connecting" in self.status: + status_style = "yellow dim" + elif "Done" in self.status or "Stopped" in self.status: + status_style = "dim" + else: + status_style = "red" + header_text.append(self.status, style=status_style) + + header = Align.left(header_text, vertical="middle", pad=False) + + # Create main transcript area - no title, minimal border + transcript_text = Text( + self.transcript or "...", style="white" if self.transcript else "dim" + ) + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + # Minimal footer + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + # Handle error display + if self.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + pyaudio = load_pyaudio() + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + # stream.read is blocking; run it off-thread + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Real-time microphone transcription.") + parser.add_argument("--model", default="voxtral-mini-transcribe-realtime-2602", help="Model ID") + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", type=int, default=10, help="Chunk duration in ms" + ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) + parser.add_argument( + "--api-key", default=os.environ.get("MISTRAL_API_KEY"), help="Mistral API key" + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + + client = Mistral(api_key=api_key, server_url=args.base_url) + + # microphone is always pcm_s16le here + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + mic_stream = iter_microphone( + sample_rate=args.sample_rate, chunk_duration_ms=args.chunk_duration + ) + + display = TranscriptDisplay( + model=args.model, target_streaming_delay_ms=args.target_streaming_delay_ms + ) + + with Live( + display.render(), console=console, refresh_per_second=10, screen=True + ) as live: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=mic_stream, + model=args.model, + audio_format=audio_format, + target_streaming_delay_ms=args.target_streaming_delay_ms, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + display.set_listening() + live.update(display.render()) + elif isinstance(event, TranscriptionStreamTextDelta): + display.add_text(event.text) + live.update(display.render()) + elif isinstance(event, TranscriptionStreamDone): + display.set_done() + live.update(display.render()) + break + elif isinstance(event, RealtimeTranscriptionError): + display.set_error(str(event.error)) + live.update(display.render()) + return 1 + elif isinstance(event, UnknownRealtimeEvent): + continue + except KeyboardInterrupt: + display.status = "⏹️ Stopped" + live.update(display.render()) + except Exception as exc: + display.set_error(str(exc)) + live.update(display.render()) + return 1 + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py new file mode 100644 index 00000000..c005cf3f --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python + +import argparse +import asyncio +import os +import subprocess +import sys +import tempfile +from pathlib import Path +from typing import AsyncIterator + +from mistralai.client import Mistral +from mistralai.extra.realtime.connection import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + + +def convert_audio_to_pcm( + input_path: Path, +) -> Path: + temp_file = tempfile.NamedTemporaryFile(suffix=".pcm", delete=False) + temp_path = Path(temp_file.name) + temp_file.close() + + cmd = [ + "ffmpeg", + "-y", + "-i", + str(input_path), + "-f", + "s16le", + "-ar", + str(16000), + "-ac", + "1", + str(temp_path), + ] + + try: + subprocess.run(cmd, check=True, capture_output=True, text=True) + except subprocess.CalledProcessError as exc: + temp_path.unlink(missing_ok=True) + raise RuntimeError(f"ffmpeg conversion failed: {exc.stderr}") from exc + + return temp_path + + +async def aiter_audio_file( + path: Path, + *, + chunk_size: int = 4096, + chunk_delay: float = 0.0, +) -> AsyncIterator[bytes]: + with open(path, "rb") as f: + while True: + chunk = f.read(chunk_size) + if not chunk: + break + yield chunk + if chunk_delay > 0: + await asyncio.sleep(chunk_delay) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Real-time audio transcription via WebSocket (iterator-based)." + ) + parser.add_argument("file", type=Path, help="Path to the audio file") + parser.add_argument("--model", default="voxtral-mini-2601", help="Model ID") + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "https://api.mistral.ai"), + help="API base URL (http/https/ws/wss)", + ) + parser.add_argument( + "--chunk-size", type=int, default=4096, help="Audio chunk size in bytes" + ) + parser.add_argument( + "--chunk-delay", + type=float, + default=0.01, + help="Delay between chunks in seconds", + ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) + parser.add_argument( + "--no-convert", + action="store_true", + help="Skip ffmpeg conversion (input must be raw PCM)", + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + pcm_path = args.file + temp_path = None + + if not args.no_convert and args.file.suffix.lower() not in (".pcm", ".raw"): + pcm_path = convert_audio_to_pcm(args.file) + temp_path = pcm_path + + client = Mistral(api_key=api_key, server_url=args.base_url) + + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=aiter_audio_file( + pcm_path, + chunk_size=args.chunk_size, + chunk_delay=args.chunk_delay, + ), + model=args.model, + audio_format=AudioFormat(encoding="pcm_s16le", sample_rate=16000), + target_streaming_delay_ms=args.target_streaming_delay_ms, + ): + if isinstance(event, TranscriptionStreamTextDelta): + print(event.text, end="", flush=True) + elif isinstance(event, TranscriptionStreamDone): + print() + break + elif isinstance(event, RealtimeTranscriptionError): + print(f"\nError: {event.error}", file=sys.stderr) + break + elif isinstance(event, UnknownRealtimeEvent): + # ignore future / unknown events; keep going + continue + + finally: + if temp_path is not None: + temp_path.unlink(missing_ok=True) + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/chat_base64.py b/examples/mistral/audio/chat_base64.py index ea5ea79a..d6afb2ab 100755 --- a/examples/mistral/audio/chat_base64.py +++ b/examples/mistral/audio/chat_base64.py @@ -2,9 +2,8 @@ import base64 import os -from mistralai import Mistral -from mistralai.models import UserMessage - +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): @@ -16,13 +15,17 @@ def main(): content = f.read() chat_response = client.chat.complete( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What's in this audio file?"}, - { - "type": "input_audio", - "input_audio": base64.b64encode(content).decode('utf-8'), - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What's in this audio file?"}, + { + "type": "input_audio", + "input_audio": base64.b64encode(content).decode("utf-8"), + }, + ] + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/mistral/audio/chat_no_streaming.py b/examples/mistral/audio/chat_no_streaming.py index 2caebb25..87237ec0 100755 --- a/examples/mistral/audio/chat_no_streaming.py +++ b/examples/mistral/audio/chat_no_streaming.py @@ -2,9 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage - +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): @@ -15,13 +14,17 @@ def main(): chat_response = client.chat.complete( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What is this audio about?"}, - { - "type": "input_audio", - "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", + }, + ] + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index 060bfdd9..b418ef57 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -2,9 +2,9 @@ import os -from mistralai import Mistral, File -from mistralai.models import UserMessage - +from mistralai.client import Mistral +from mistralai.client.models import File +from mistralai.client.models import UserMessage def main(): @@ -13,19 +13,25 @@ def main(): client = Mistral(api_key=api_key) with open("examples/fixtures/bcn_weather.mp3", "rb") as f: - file = client.files.upload(file=File(content=f, file_name=f.name), purpose="audio") + file = client.files.upload( + file=File(content=f, file_name=f.name), purpose="audio" + ) print(f"Uploaded audio file, id={file.id}") signed_url = client.files.get_signed_url(file_id=file.id) try: chat_response = client.chat.stream( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What is this audio about?"}, - { - "type": "input_audio", - "input_audio": signed_url.url, - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": signed_url.url, + }, + ] + ) + ], ) for chunk in chat_response: print(chunk.data.choices[0].delta.content) @@ -33,5 +39,6 @@ def main(): client.files.delete(file_id=file.id) print(f"Deleted audio file, id={file.id}") + if __name__ == "__main__": main() diff --git a/examples/mistral/audio/pyaudio_utils.py b/examples/mistral/audio/pyaudio_utils.py new file mode 100644 index 00000000..af72a885 --- /dev/null +++ b/examples/mistral/audio/pyaudio_utils.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from types import ModuleType + + +def load_pyaudio() -> ModuleType: + """ + Import PyAudio with a friendly error when PortAudio is missing. + + Raises: + RuntimeError: If PyAudio/PortAudio cannot be imported. + """ + try: + import pyaudio + except Exception as exc: + details = str(exc).lower() + if isinstance(exc, ModuleNotFoundError) and exc.name == "pyaudio": + message = ( + "PyAudio is required to use the microphone.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + elif "pyaudio._portaudio" in details or "portaudio" in details: + message = ( + "PyAudio is installed, but the PortAudio native library is missing or " + "failed to load.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + else: + message = ( + "PyAudio is required to use the microphone, but it could not be " + "imported.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + raise RuntimeError(message) from exc + return pyaudio diff --git a/examples/mistral/audio/transcription_async.py b/examples/mistral/audio/transcription_async.py index 9092fc03..f04f397e 100644 --- a/examples/mistral/audio/transcription_async.py +++ b/examples/mistral/audio/transcription_async.py @@ -2,7 +2,8 @@ import os import asyncio -from mistralai import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py new file mode 100644 index 00000000..4b511c87 --- /dev/null +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import os +import asyncio +import pathlib +from mistralai.client import Mistral +from mistralai.client.models import File + +fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-2602" + + client = Mistral(api_key=api_key) + with open(fixture_dir / "bcn_weather.mp3", "rb") as f: + response = await client.audio.transcriptions.complete_async( + model=model, + file=File(content=f, file_name=f.name), + diarize=True, + timestamp_granularities=["segment"], + ) + for segment in response.segments: + speaker = segment.speaker_id or "unknown" + print( + f"[{segment.start:.1f}s → {segment.end:.1f}s] {speaker}: {segment.text.strip()}" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/audio/transcription_segments.py b/examples/mistral/audio/transcription_segments.py index 626b83e2..3d691711 100644 --- a/examples/mistral/audio/transcription_segments.py +++ b/examples/mistral/audio/transcription_segments.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/audio/transcription_segments_stream.py b/examples/mistral/audio/transcription_segments_stream.py index bedfbd40..32edf951 100644 --- a/examples/mistral/audio/transcription_segments_stream.py +++ b/examples/mistral/audio/transcription_segments_stream.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/audio/transcription_stream_async.py b/examples/mistral/audio/transcription_stream_async.py index b7f553b3..3055f3de 100644 --- a/examples/mistral/audio/transcription_stream_async.py +++ b/examples/mistral/audio/transcription_stream_async.py @@ -2,7 +2,8 @@ import asyncio import os -from mistralai import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/audio/transcription_url.py b/examples/mistral/audio/transcription_url.py index b194b50c..907f830d 100644 --- a/examples/mistral/audio/transcription_url.py +++ b/examples/mistral/audio/transcription_url.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/async_chat_no_streaming.py b/examples/mistral/chat/async_chat_no_streaming.py index 9448f09d..ad45d0fd 100755 --- a/examples/mistral/chat/async_chat_no_streaming.py +++ b/examples/mistral/chat/async_chat_no_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_chat_with_image_no_streaming.py b/examples/mistral/chat/async_chat_with_image_no_streaming.py index efadff89..5d2cbdaa 100755 --- a/examples/mistral/chat/async_chat_with_image_no_streaming.py +++ b/examples/mistral/chat/async_chat_with_image_no_streaming.py @@ -4,8 +4,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_chat_with_streaming.py b/examples/mistral/chat/async_chat_with_streaming.py index 1ef500ae..1642ea41 100755 --- a/examples/mistral/chat/async_chat_with_streaming.py +++ b/examples/mistral/chat/async_chat_with_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_structured_outputs.py b/examples/mistral/chat/async_structured_outputs.py index a512d38f..09ed5737 100644 --- a/examples/mistral/chat/async_structured_outputs.py +++ b/examples/mistral/chat/async_structured_outputs.py @@ -4,7 +4,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/chat/chat_no_streaming.py b/examples/mistral/chat/chat_no_streaming.py index 72506dd9..5f6968ca 100755 --- a/examples/mistral/chat/chat_no_streaming.py +++ b/examples/mistral/chat/chat_no_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chat_prediction.py b/examples/mistral/chat/chat_prediction.py index 1ff87e3f..88c57e77 100644 --- a/examples/mistral/chat/chat_prediction.py +++ b/examples/mistral/chat/chat_prediction.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chat_with_streaming.py b/examples/mistral/chat/chat_with_streaming.py index 66b167f1..94a3e29c 100755 --- a/examples/mistral/chat/chat_with_streaming.py +++ b/examples/mistral/chat/chat_with_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chatbot_with_streaming.py b/examples/mistral/chat/chatbot_with_streaming.py index 8d47deb5..eae79dcf 100755 --- a/examples/mistral/chat/chatbot_with_streaming.py +++ b/examples/mistral/chat/chatbot_with_streaming.py @@ -7,9 +7,10 @@ import os import readline import sys +from typing import Any -from mistralai import Mistral -from mistralai.models import AssistantMessage, SystemMessage, UserMessage +from mistralai.client import Mistral +from mistralai.client.models import AssistantMessage, SystemMessage, UserMessage MODEL_LIST = [ "mistral-small-latest", @@ -21,7 +22,7 @@ DEFAULT_TEMPERATURE = 0.7 LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" # A dictionary of all commands and their arguments, used for tab completion. -COMMAND_LIST = { +COMMAND_LIST: dict[str, Any] = { "/new": {}, "/help": {}, "/model": {model: {} for model in MODEL_LIST}, # Nested completions for models diff --git a/examples/mistral/chat/completion_with_streaming.py b/examples/mistral/chat/completion_with_streaming.py index 5bee2033..399e8638 100644 --- a/examples/mistral/chat/completion_with_streaming.py +++ b/examples/mistral/chat/completion_with_streaming.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/chat/function_calling.py b/examples/mistral/chat/function_calling.py index aba7d671..68e9d91c 100644 --- a/examples/mistral/chat/function_calling.py +++ b/examples/mistral/chat/function_calling.py @@ -1,16 +1,20 @@ import functools import json import os -from typing import Dict, List - -from mistralai import Mistral -from mistralai.models.assistantmessage import AssistantMessage -from mistralai.models.function import Function -from mistralai.models.toolmessage import ToolMessage -from mistralai.models.usermessage import UserMessage +from typing import Any + +from mistralai.client import Mistral +from mistralai.client.models import ( + AssistantMessage, + ChatCompletionRequestMessage, + Function, + Tool, + ToolMessage, + UserMessage, +) # Assuming we have the following data -data = { +data: dict[str, list[Any]] = { "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], "customer_id": ["C001", "C002", "C003", "C002", "C001"], "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], @@ -25,20 +29,18 @@ } -def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: +def retrieve_payment_status(data: dict[str, list[Any]], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) -def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: +def retrieve_payment_date(data: dict[str, list[Any]], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"date": data["payment_date"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { @@ -46,10 +48,9 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data), } -tools = [ - { - "type": "function", - "function": Function( +tools: list[Tool] = [ + Tool( + function=Function( name="retrieve_payment_status", description="Get payment status of a transaction id", parameters={ @@ -63,10 +64,9 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: }, }, ), - }, - { - "type": "function", - "function": Function( + ), + Tool( + function=Function( name="retrieve_payment_date", description="Get payment date of a transaction id", parameters={ @@ -80,7 +80,7 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: }, }, ), - }, + ), ] api_key = os.environ["MISTRAL_API_KEY"] @@ -88,28 +88,27 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: client = Mistral(api_key=api_key) -messages = [UserMessage(content="What's the status of my transaction?")] +messages: list[ChatCompletionRequestMessage] = [ + UserMessage(content="What's the status of my transaction?") +] -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) print(response.choices[0].message.content) messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) -tool_call = response.choices[0].message.tool_calls[0] +tool_calls = response.choices[0].message.tool_calls +if not tool_calls: + raise RuntimeError("Expected tool calls") +tool_call = tool_calls[0] function_name = tool_call.function.name -function_params = json.loads(tool_call.function.arguments) +function_params = json.loads(str(tool_call.function.arguments)) -print( - f"calling function_name: {function_name}, with function_params: {function_params}" -) +print(f"calling function_name: {function_name}, with function_params: {function_params}") function_result = names_to_functions[function_name](**function_params) @@ -128,8 +127,6 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: ) print(messages) -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) print(f"{response.choices[0].message.content}") diff --git a/examples/mistral/chat/json_format.py b/examples/mistral/chat/json_format.py index 23c38680..8fa1416a 100755 --- a/examples/mistral/chat/json_format.py +++ b/examples/mistral/chat/json_format.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/structured_outputs.py b/examples/mistral/chat/structured_outputs.py index bc4a5e18..64521f46 100644 --- a/examples/mistral/chat/structured_outputs.py +++ b/examples/mistral/chat/structured_outputs.py @@ -3,7 +3,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/structured_outputs_with_json_schema.py b/examples/mistral/chat/structured_outputs_with_json_schema.py index 69ac9690..2f99f747 100644 --- a/examples/mistral/chat/structured_outputs_with_json_schema.py +++ b/examples/mistral/chat/structured_outputs_with_json_schema.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/structured_outputs_with_pydantic.py b/examples/mistral/chat/structured_outputs_with_pydantic.py index 299f7509..ded9d52d 100644 --- a/examples/mistral/chat/structured_outputs_with_pydantic.py +++ b/examples/mistral/chat/structured_outputs_with_pydantic.py @@ -3,7 +3,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral from typing import List diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index 10c8bb76..881f6a69 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -2,7 +2,8 @@ from pprint import pprint import asyncio -from mistralai import Mistral, TrainingFile, ClassifierTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import ClassifierFineTuningJob, ClassifierFineTuningJobDetails, ClassifierTrainingParameters, TrainingFile import os @@ -26,7 +27,7 @@ async def upload_files(client: Mistral, file_names: list[str]) -> list[str]: return file_ids -async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: +async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str | None: print("Creating job...") job = await client.fine_tuning.jobs.create_async( model="ministral-3b-latest", @@ -35,11 +36,14 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: TrainingFile(file_id=training_file_id) for training_file_id in training_file_ids ], - hyperparameters=ClassifierTrainingParametersIn( + hyperparameters=ClassifierTrainingParameters( learning_rate=0.0001, ), auto_start=True, ) + if not isinstance(job, ClassifierFineTuningJob): + print("Unexpected job type returned") + return None print(f"Job created ({job.id})") @@ -47,6 +51,8 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: while True: await asyncio.sleep(10) detailed_job = await client.fine_tuning.jobs.get_async(job_id=job.id) + if not isinstance(detailed_job, ClassifierFineTuningJobDetails): + raise Exception(f"Unexpected job type: {type(detailed_job)}") if detailed_job.status not in [ "QUEUED", "STARTED", @@ -62,6 +68,9 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: print("Training failed") raise Exception(f"Job failed {detailed_job.status}") + if not detailed_job.fine_tuned_model: + print("No fine-tuned model returned") + return None print(f"Training succeed: {detailed_job.fine_tuned_model}") return detailed_job.fine_tuned_model diff --git a/examples/mistral/embeddings/async_embeddings.py b/examples/mistral/embeddings/async_embeddings.py index 781e87af..413769f3 100755 --- a/examples/mistral/embeddings/async_embeddings.py +++ b/examples/mistral/embeddings/async_embeddings.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/embeddings/embeddings.py b/examples/mistral/embeddings/embeddings.py index 046c87d4..64301ca0 100755 --- a/examples/mistral/embeddings/embeddings.py +++ b/examples/mistral/embeddings/embeddings.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/fim/async_code_completion.py b/examples/mistral/fim/async_code_completion.py index a6bc5717..cb6db241 100644 --- a/examples/mistral/fim/async_code_completion.py +++ b/examples/mistral/fim/async_code_completion.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/fim/code_completion.py b/examples/mistral/fim/code_completion.py index f3d70a68..4f25c59c 100644 --- a/examples/mistral/fim/code_completion.py +++ b/examples/mistral/fim/code_completion.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py new file mode 100644 index 00000000..d2a1679f --- /dev/null +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -0,0 +1,49 @@ +from mistralai.client import Mistral +from mistralai.client.models import BatchRequest, UserMessage +import os +import asyncio + + +async def main(): + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + requests = [BatchRequest( + custom_id=str(i), + body=dict( + model="mistral-medium-latest", + messages=[UserMessage( + content=f"What's i + {i}" + )] + ) + ) for i in range(5) + ] + + job = await client.batch.jobs.create_async( + requests=requests, + model="mistral-small-latest", + endpoint="/v1/chat/completions", + metadata={"job_type": "testing"} + ) + + print(f"Created job with ID: {job.id}") + + max_wait = 60 # 1 minute timeout for CI + elapsed = 0 + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: + await asyncio.sleep(1) + elapsed += 1 + if elapsed >= max_wait: + print(f"Timeout after {max_wait}s, job still {job.status}") + return + job = await client.batch.jobs.get_async(job_id=job.id) + print(f"Job status: {job.status}") + + print(f"Job is done, status {job.status}") + if job.outputs: + for res in job.outputs: + print(res["response"]["body"]) + else: + print(f"No outputs (succeeded: {job.succeeded_requests}, failed: {job.failed_requests})") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_files.py b/examples/mistral/jobs/async_files.py index 4dc21542..4bec5237 100644 --- a/examples/mistral/jobs/async_files.py +++ b/examples/mistral/jobs/async_files.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/jobs/async_fine_tuning.py b/examples/mistral/jobs/async_fine_tuning.py new file mode 100644 index 00000000..080dbe03 --- /dev/null +++ b/examples/mistral/jobs/async_fine_tuning.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create new files + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: + training_file = await client.files.upload_async( + file=File(file_name="file.jsonl", content=f) + ) + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: + validation_file = await client.files.upload_async( + file=File(file_name="validation_file.jsonl", content=f) + ) + + # Create a new job + created_job = await client.fine_tuning.jobs.create_async( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + validation_files=[validation_file.id], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + # List jobs + jobs = await client.fine_tuning.jobs.list_async(page=0, page_size=5) + print(jobs) + + # Retrieve a job + retrieved_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) + print(retrieved_job) + + # Cancel a job + canceled_job = await client.fine_tuning.jobs.cancel_async(job_id=created_job.id) + print(canceled_job) + + # Delete files + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_fine_tuning_chat.py b/examples/mistral/jobs/async_fine_tuning_chat.py new file mode 100644 index 00000000..f170fed4 --- /dev/null +++ b/examples/mistral/jobs/async_fine_tuning_chat.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +import asyncio +import json +import os +import random +from pathlib import Path + +from mistralai.client import Mistral +from mistralai.client.models import ( + File, + CompletionTrainingParametersIn, +) + +POLLING_INTERVAL = 10 + +cwd = Path(__file__).parent + +user_contents = [ + "How far is the Moon from Earth?", + "What's the largest ocean on Earth?", + "How many continents are there?", + "What's the powerhouse of the cell?", + "What's the speed of light?", + "Can you solve a Rubik's Cube?", + "What is the tallest mountain in the world?", + "Who painted the Mona Lisa?", +] + +# List of assistant contents +assistant_contents = [ + "Around 384,400 kilometers. Give or take a few, like that really matters.", + "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal.", + "There are seven continents. I hope that wasn't too hard to count.", + "The mitochondria. Remember that from high school biology?", + "Approximately 299,792 kilometers per second. You know, faster than your internet speed.", + "I could if I had hands. What's your excuse?", + "Mount Everest, standing at 29,029 feet. You know, just a little hill.", + "Leonardo da Vinci. Just another guy who liked to doodle.", +] + +system_message = "Marv is a factual chatbot that is also sarcastic" + +def create_validation_file() -> bytes: + return json.dumps({ + "messages": [ + {"role": "user", "content": "How long does it take to travel around the Earth?"}, + {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."} + ], + "temperature": random.random() + }).encode() + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + requests = [] + for um, am in zip( + random.sample(user_contents, len(user_contents)), + random.sample(assistant_contents, len(assistant_contents)), + ): + requests.append(json.dumps({ + "messages": [ + {"role": "system", "content": system_message}, + {"role": "user", "content": um}, + {"role": "assistant", "content": am}, + ] + })) + + # Create new files + training_file = await client.files.upload_async( + file=File( + file_name="file.jsonl", content=("\n".join(requests)).encode() + ), + purpose="fine-tune", + ) + + validation_file = await client.files.upload_async( + file=File( + file_name="validation_file.jsonl", content=create_validation_file() + ), + purpose="fine-tune", + ) + # Create a new job + created_job = await client.fine_tuning.jobs.create_async( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + validation_files=[validation_file.id], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + ), + ) + + while created_job.status in ["RUNNING", "STARTED", "QUEUED", "VALIDATING", "VALIDATED"]: + created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) + print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") + await asyncio.sleep(POLLING_INTERVAL) + + if created_job.status == "FAILED": + print("Job failed") + raise Exception(f"Job failed with {created_job.status}") + + print(created_job) + # Chat with model + response = await client.chat.complete_async( + model=created_job.fine_tuned_model, + messages=[ + { + "role": "system", + "content": "Marv is a factual chatbot that is also sarcastic.", + }, + {"role": "user", "content": "What is the capital of France ?"}, + ], + ) + + print(response.choices[0].message.content) + + # Delete files + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) + + # Delete fine-tuned model + await client.models.delete_async(model_id=created_job.fine_tuned_model) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_jobs.py b/examples/mistral/jobs/async_jobs.py deleted file mode 100644 index 44a58af1..00000000 --- a/examples/mistral/jobs/async_jobs.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai import Mistral -from mistralai.models import File, CompletionTrainingParametersIn - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = Mistral(api_key=api_key) - - # Create new files - with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: - training_file = await client.files.upload_async( - file=File(file_name="file.jsonl", content=f) - ) - with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: - validation_file = await client.files.upload_async( - file=File(file_name="validation_file.jsonl", content=f) - ) - - # Create a new job - created_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", - training_files=[{"file_id": training_file.id, "weight": 1}], - validation_files=[validation_file.id], - hyperparameters=CompletionTrainingParametersIn( - training_steps=1, - learning_rate=0.0001, - ), - ) - print(created_job) - - # List jobs - jobs = await client.fine_tuning.jobs.list_async(page=0, page_size=5) - print(jobs) - - # Retrieve a job - retrieved_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) - print(retrieved_job) - - # Cancel a job - canceled_job = await client.fine_tuning.jobs.cancel_async(job_id=created_job.id) - print(canceled_job) - - # Delete files - await client.files.delete_async(file_id=training_file.id) - await client.files.delete_async(file_id=validation_file.id) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/mistral/jobs/async_jobs_chat.py b/examples/mistral/jobs/async_jobs_chat.py deleted file mode 100644 index 80e598c7..00000000 --- a/examples/mistral/jobs/async_jobs_chat.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -import asyncio -import json -import os -import random -from pathlib import Path - -from mistralai import Mistral -from mistralai.models import ( - File, - CompletionTrainingParametersIn, -) - -POLLING_INTERVAL = 10 - -cwd = Path(__file__).parent - -user_contents = [ - "How far is the Moon from Earth?", - "What's the largest ocean on Earth?", - "How many continents are there?", - "What's the powerhouse of the cell?", - "What's the speed of light?", - "Can you solve a Rubik's Cube?", - "What is the tallest mountain in the world?", - "Who painted the Mona Lisa?", -] - -# List of assistant contents -assistant_contents = [ - "Around 384,400 kilometers. Give or take a few, like that really matters.", - "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal.", - "There are seven continents. I hope that wasn't too hard to count.", - "The mitochondria. Remember that from high school biology?", - "Approximately 299,792 kilometers per second. You know, faster than your internet speed.", - "I could if I had hands. What's your excuse?", - "Mount Everest, standing at 29,029 feet. You know, just a little hill.", - "Leonardo da Vinci. Just another guy who liked to doodle.", -] - -system_message = "Marv is a factual chatbot that is also sarcastic" - -def create_validation_file() -> bytes: - return json.dumps({ - "messages": [ - {"role": "user", "content": "How long does it take to travel around the Earth?"}, - {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."} - ], - "temperature": random.random() - }).encode() - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - client = Mistral(api_key=api_key) - - requests = [] - for um, am in zip( - random.sample(user_contents, len(user_contents)), - random.sample(assistant_contents, len(assistant_contents)), - ): - requests.append(json.dumps({ - "messages": [ - {"role": "system", "content": system_message}, - {"role": "user", "content": um}, - {"role": "assistant", "content": am}, - ] - })) - - # Create new files - training_file = await client.files.upload_async( - file=File( - file_name="file.jsonl", content=("\n".join(requests)).encode() - ), - purpose="fine-tune", - ) - - validation_file = await client.files.upload_async( - file=File( - file_name="validation_file.jsonl", content=create_validation_file() - ), - purpose="fine-tune", - ) - # Create a new job - created_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", - training_files=[{"file_id": training_file.id, "weight": 1}], - validation_files=[validation_file.id], - hyperparameters=CompletionTrainingParametersIn( - training_steps=1, - learning_rate=0.0001, - ), - ) - - while created_job.status in ["RUNNING", "STARTED", "QUEUED", "VALIDATING", "VALIDATED"]: - created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) - print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") - await asyncio.sleep(POLLING_INTERVAL) - - if created_job.status == "FAILED": - print("Job failed") - raise Exception(f"Job failed with {created_job.status}") - - print(created_job) - # Chat with model - response = await client.chat.complete_async( - model=created_job.fine_tuned_model, - messages=[ - { - "role": "system", - "content": "Marv is a factual chatbot that is also sarcastic.", - }, - {"role": "user", "content": "What is the capital of France ?"}, - ], - ) - - print(response.choices[0].message.content) - - # Delete files - await client.files.delete_async(file_id=training_file.id) - await client.files.delete_async(file_id=validation_file.id) - - # Delete fine-tuned model - await client.models.delete_async(model_id=created_job.fine_tuned_model) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py new file mode 100644 index 00000000..f209507d --- /dev/null +++ b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +import asyncio +import json +import os +from typing import List + +import httpx +from pydantic import BaseModel, Field + +from mistralai.client import Mistral +from mistralai.extra import response_format_from_pydantic_model +from mistralai.client.models import File + +SAMPLE_PDF_URL = "https://arxiv.org/pdf/2401.04088" + + +class Table(BaseModel): + name: str = Field(description="The name or title of the table") + + +class TableExtraction(BaseModel): + tables: List[Table] = Field(description="List of tables found in the document") + + +def create_ocr_batch_request(custom_id: str, document_url: str) -> dict: + """Batch requests require custom_id and body wrapper.""" + response_format = response_format_from_pydantic_model(TableExtraction) + return { + "custom_id": custom_id, + "body": { + "document": {"type": "document_url", "document_url": document_url}, + "document_annotation_format": response_format.model_dump( + by_alias=True, exclude_none=True + ), + "pages": [0, 1, 2, 3, 4, 5, 6, 7], + "include_image_base64": False, + }, + } + + +async def main(): + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + document_urls = [SAMPLE_PDF_URL] + + batch_requests = [ + json.dumps(create_ocr_batch_request(custom_id=str(i), document_url=url)) + for i, url in enumerate(document_urls) + ] + batch_content = "\n".join(batch_requests) + + print("Uploading batch file...") + batch_file = await client.files.upload_async( + file=File(file_name="ocr_batch.jsonl", content=batch_content.encode()), + purpose="batch", + ) + print(f"Batch file uploaded: {batch_file.id}") + + print("Creating batch job...") + created_job = await client.batch.jobs.create_async( + model="mistral-ocr-latest", + input_files=[batch_file.id], + endpoint="/v1/ocr", + ) + print(f"Batch job created: {created_job.id}") + + print("Waiting for job completion...") + job = await client.batch.jobs.get_async(job_id=created_job.id) + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: + print(f"Status: {job.status}") + await asyncio.sleep(5) + job = await client.batch.jobs.get_async(job_id=created_job.id) + + print(f"Job status: {job.status}") + + async with httpx.AsyncClient() as http_client: + if job.output_file: + signed_url = await client.files.get_signed_url_async( + file_id=job.output_file + ) + response = await http_client.get(signed_url.url) + for line in response.content.decode().strip().split("\n"): + result = json.loads(line) + annotation = result["response"]["body"].get("document_annotation") + if annotation: + tables = TableExtraction.model_validate_json(annotation) + for table in tables.tables: + print(table.name) + + if job.error_file: + signed_url = await client.files.get_signed_url_async(file_id=job.error_file) + response = await http_client.get(signed_url.url) + print("Errors:", response.content.decode()) + + print("\nCleaning up...") + await client.files.delete_async(file_id=batch_file.id) + print("Done!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/dry_run_job.py b/examples/mistral/jobs/dry_run_job.py deleted file mode 100644 index 84a2d0ce..00000000 --- a/examples/mistral/jobs/dry_run_job.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai import Mistral -from mistralai.models import CompletionTrainingParametersIn - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = Mistral(api_key=api_key) - - # Create new files - with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: - training_file = await client.files.upload_async( - file={"file_name": "test-file.jsonl", "content": f} - ) - - # Create a new job - dry_run_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", - training_files=[{"file_id": training_file.id, "weight": 1}], - hyperparameters=CompletionTrainingParametersIn( - training_steps=1, - learning_rate=0.0001, - warmup_fraction=0.01, - ), - auto_start=False, - ) - - print("Dry run job created") - print(f"Job ID: {dry_run_job}") - print(f"Train tokens: {dry_run_job.trained_tokens}") - print(f"Dataset tokens: {dry_run_job.metadata.data_tokens}") - print(f"Epochs number: {dry_run_job.hyperparameters.epochs}") - print(f"Expected duration: {dry_run_job.metadata.expected_duration_seconds}") - print(f"Cost: {dry_run_job.metadata.cost} {dry_run_job.metadata.cost_currency}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/mistral/jobs/files.py b/examples/mistral/jobs/files.py index 5dce880b..50f6472c 100644 --- a/examples/mistral/jobs/files.py +++ b/examples/mistral/jobs/files.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File def main(): diff --git a/examples/mistral/jobs/fine_tuning.py b/examples/mistral/jobs/fine_tuning.py new file mode 100644 index 00000000..2d155cc2 --- /dev/null +++ b/examples/mistral/jobs/fine_tuning.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +import os + +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create new files + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: + training_file = client.files.upload( + file=File(file_name="file.jsonl", content=f) + ) + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: + validation_file = client.files.upload( + file=File(file_name="validation_file.jsonl", content=f) + ) + + # Create a new job + created_job = client.fine_tuning.jobs.create( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + validation_files=[validation_file.id], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + # List jobs + jobs = client.fine_tuning.jobs.list(page=0, page_size=5) + print(jobs) + + # Retrieve a job + retrieved_job = client.fine_tuning.jobs.get(job_id=created_job.id) + print(retrieved_job) + + # Cancel a job + canceled_job = client.fine_tuning.jobs.cancel(job_id=created_job.id) + print(canceled_job) + + # Delete files + client.files.delete(file_id=training_file.id) + client.files.delete(file_id=validation_file.id) + + +if __name__ == "__main__": + main() diff --git a/examples/mistral/jobs/fine_tuning_dry_run.py b/examples/mistral/jobs/fine_tuning_dry_run.py new file mode 100644 index 00000000..d0c6f733 --- /dev/null +++ b/examples/mistral/jobs/fine_tuning_dry_run.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.client.models import CompletionTrainingParametersIn + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # Create new files + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: + training_file = await client.files.upload_async( + file={"file_name": "test-file.jsonl", "content": f} + ) + + # Create a new job + dry_run_job = await client.fine_tuning.jobs.create_async( + model="mistral-small-latest", + training_files=[{"file_id": training_file.id, "weight": 1}], + hyperparameters=CompletionTrainingParametersIn( + training_steps=1, + learning_rate=0.0001, + warmup_fraction=0.01, + ), + auto_start=False, + ) + + print("Dry run job created") + print(f"Job ID: {dry_run_job}") + print(f"Train tokens: {dry_run_job.trained_tokens}") + print(f"Dataset tokens: {dry_run_job.metadata.data_tokens}") + print(f"Epochs number: {dry_run_job.hyperparameters.epochs}") + print(f"Expected duration: {dry_run_job.metadata.expected_duration_seconds}") + print(f"Cost: {dry_run_job.metadata.cost} {dry_run_job.metadata.cost_currency}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/jobs.py b/examples/mistral/jobs/jobs.py deleted file mode 100644 index f65fda8e..00000000 --- a/examples/mistral/jobs/jobs.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -import os - -from mistralai import Mistral -from mistralai.models import File, CompletionTrainingParametersIn - - -def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = Mistral(api_key=api_key) - - # Create new files - with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: - training_file = client.files.upload( - file=File(file_name="file.jsonl", content=f) - ) - with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: - validation_file = client.files.upload( - file=File(file_name="validation_file.jsonl", content=f) - ) - - # Create a new job - created_job = client.fine_tuning.jobs.create( - model="open-mistral-7b", - training_files=[{"file_id": training_file.id, "weight": 1}], - validation_files=[validation_file.id], - hyperparameters=CompletionTrainingParametersIn( - training_steps=1, - learning_rate=0.0001, - ), - ) - print(created_job) - - # List jobs - jobs = client.fine_tuning.jobs.list(page=0, page_size=5) - print(jobs) - - # Retrieve a job - retrieved_job = client.fine_tuning.jobs.get(job_id=created_job.id) - print(retrieved_job) - - # Cancel a job - canceled_job = client.fine_tuning.jobs.cancel(job_id=created_job.id) - print(canceled_job) - - # Delete files - client.files.delete(file_id=training_file.id) - client.files.delete(file_id=validation_file.id) - - -if __name__ == "__main__": - main() diff --git a/examples/mistral/libraries/async_libraries.py b/examples/mistral/libraries/async_libraries.py index b2f9d4c4..fc5e6541 100644 --- a/examples/mistral/libraries/async_libraries.py +++ b/examples/mistral/libraries/async_libraries.py @@ -3,8 +3,8 @@ import os import asyncio -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/libraries/libraries.py b/examples/mistral/libraries/libraries.py index 88436540..8e4b2998 100644 --- a/examples/mistral/libraries/libraries.py +++ b/examples/mistral/libraries/libraries.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File def main(): diff --git a/examples/mistral/models/async_list_models.py b/examples/mistral/models/async_list_models.py index 4243d862..8b1ac503 100755 --- a/examples/mistral/models/async_list_models.py +++ b/examples/mistral/models/async_list_models.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/models/list_models.py b/examples/mistral/models/list_models.py index c6c0c855..9b68f806 100755 --- a/examples/mistral/models/list_models.py +++ b/examples/mistral/models/list_models.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/ocr/ocr_process_from_file.py b/examples/mistral/ocr/ocr_process_from_file.py index 84a7b4d8..9368ceeb 100644 --- a/examples/mistral/ocr/ocr_process_from_file.py +++ b/examples/mistral/ocr/ocr_process_from_file.py @@ -1,4 +1,4 @@ -from mistralai import Mistral +from mistralai.client import Mistral import os import json from pathlib import Path diff --git a/examples/mistral/ocr/ocr_process_from_url.py b/examples/mistral/ocr/ocr_process_from_url.py index 55f31282..4f3b0224 100644 --- a/examples/mistral/ocr/ocr_process_from_url.py +++ b/examples/mistral/ocr/ocr_process_from_url.py @@ -1,7 +1,7 @@ import json import os -from mistralai import Mistral +from mistralai.client import Mistral MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825.pdf" diff --git a/packages/azure/.genignore b/packages/azure/.genignore new file mode 100644 index 00000000..6bdf6621 --- /dev/null +++ b/packages/azure/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/azure/client/sdk.py +src/mistralai/azure/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/mistralai_azure/.gitattributes b/packages/azure/.gitattributes similarity index 100% rename from packages/mistralai_azure/.gitattributes rename to packages/azure/.gitattributes diff --git a/packages/azure/.gitignore b/packages/azure/.gitignore new file mode 100644 index 00000000..b386de74 --- /dev/null +++ b/packages/azure/.gitignore @@ -0,0 +1,15 @@ +.env +.env.local +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ +.speakeasy/reports +README-PYPI.md +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock new file mode 100644 index 00000000..5da824d1 --- /dev/null +++ b/packages/azure/.speakeasy/gen.lock @@ -0,0 +1,866 @@ +lockVersion: 2.0.0 +id: dc40fa48-2c4d-46ad-ac8b-270749770f34 +management: + docChecksum: 571037b8485712afcef86703debb7f15 + docVersion: 1.0.0 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0b1 + configChecksum: 01160bf17a4abd1ce038528d20cd4685 + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/azure + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure + published: true +persistentEdits: + generation_id: 2f5b7e40-9bd2-4c96-9e97-16a92e4b44af + pristine_commit_hash: 480a8b0e23da7e4752e6ad5b36fc72651e09d2d7 + pristine_tree_hash: 8a4c9b9a253fbe496a52e0496fa7e58e91e32c7c +features: + python: + additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 + constsAndDefaults: 1.0.7 + core: 6.0.12 + defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 + envVarSecurityUsage: 0.3.2 + examples: 3.0.2 + flatRequests: 1.0.1 + globalSecurity: 3.0.5 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.2.0 + includes: 3.0.0 + methodArguments: 1.0.2 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.1.4 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:00453565d70739471a4e1872c93b5b7e66fe6cb6 + pristine_git_object: f8715cd0a335c6dc0fda4b60400f11c4aa8a0a06 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:7233a19b12f3204b8e2259a4a09d0d9726609e4e + pristine_git_object: cc82a8c707268084865f86d71be82de5ebf6f821 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 + pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:eefa8ad80773e00ac297f3cf806704ac6ac3557d + pristine_git_object: 2d26c19fd1cecb234d7fb761dd73cc0a59e622ad + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 + scripts/prepare_readme.py: + id: e0c5957a6035 + last_write_checksum: sha1:26b29aad3c23a98912fd881698c976aac55749fe + pristine_git_object: 2b2577ea83873f64aa9f91d9d762bc6e1f250977 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/azure/client/__init__.py: + id: 5624bda9196d + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 + src/mistralai/azure/client/_hooks/__init__.py: + id: 850c237217cb + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai/azure/client/_hooks/sdkhooks.py: + id: e9923767446c + last_write_checksum: sha1:ae162d6e73be0eb767c353c815d76b034395d50f + pristine_git_object: 2080681b7f2c52fcb80dcb95eff48654763e6258 + src/mistralai/azure/client/_hooks/types.py: + id: 07c892e06527 + last_write_checksum: sha1:fde2e0f6da6930232b67682009de520724b23398 + pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 + src/mistralai/azure/client/_version.py: + id: a77160e60e5d + last_write_checksum: sha1:1b76e9448049c69dbdb690b9de25456378bba0a7 + pristine_git_object: 213648be87a19e24d87160c1286614b2d5df7344 + src/mistralai/azure/client/basesdk.py: + id: 5a585a95ec21 + last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 + pristine_git_object: 0d4d9a440e6c7726b6bc7fc6525aa3dc009847eb + src/mistralai/azure/client/chat.py: + id: c18454e628d7 + last_write_checksum: sha1:884e22b0e313662c67cec7101765d8d7ef0bc48a + pristine_git_object: 1051f9527851894988f7e1689923575cf72a0896 + src/mistralai/azure/client/errors/__init__.py: + id: f377703514d9 + last_write_checksum: sha1:36c516c11f8083c3380a72c1d0f0718a3345f24b + pristine_git_object: 79e2712c2e62121fb6dbaab15ca8487f0e16b07c + src/mistralai/azure/client/errors/httpvalidationerror.py: + id: c3ec0ad923e9 + last_write_checksum: sha1:f45b41c1ad980c5d481158209bf23fa795cc68bc + pristine_git_object: b4f2691e630a095ff09fbbce5e2ea3063592084f + src/mistralai/azure/client/errors/mistralazureerror.py: + id: fae868afae89 + last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c + pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + src/mistralai/azure/client/errors/no_response_error.py: + id: b838df044e62 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/azure/client/errors/responsevalidationerror.py: + id: 77ac5e93cdda + last_write_checksum: sha1:c1e045dbdda0199bc1d563819c0b38e877d0efef + pristine_git_object: 02397334d2b3bf2516808b69b2548564f650cbe0 + src/mistralai/azure/client/errors/sdkerror.py: + id: dfdd4b1d8928 + last_write_checksum: sha1:edc2baf6feb199e1b1ff1aad681622b44804299d + pristine_git_object: c4f3616cd2720a9b5d2a2c5b2d22a305629ebbe6 + src/mistralai/azure/client/httpclient.py: + id: 60c81037fbd0 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai/azure/client/models/__init__.py: + id: "335011330e21" + last_write_checksum: sha1:07054ca95df60a3f03d8ea37a361aa506f94b78b + pristine_git_object: 908dda32cebe894b37dccaaa9b84db174ac93c21 + src/mistralai/azure/client/models/assistantmessage.py: + id: 353ed9110f97 + last_write_checksum: sha1:973979ac03f86f26ee9a540aaaa8f70a7011daca + pristine_git_object: e9ae6e82c3c758561c8c9663f27b2fd7e38d2911 + src/mistralai/azure/client/models/chatcompletionchoice.py: + id: 6942c7db5891 + last_write_checksum: sha1:817bfda6120a98248322c308629e404081e01279 + pristine_git_object: 67b5ba694217f4f3b95589d7f84af6a9bea9802d + src/mistralai/azure/client/models/chatcompletionrequest.py: + id: 0c711c870184 + last_write_checksum: sha1:ffdd11a4945dd805c9a73328749c2f4d9b6f80e6 + pristine_git_object: edd0fdc74a1b81f458d6083e79dc393e488da36a + src/mistralai/azure/client/models/chatcompletionresponse.py: + id: bdfacf065e9e + last_write_checksum: sha1:c72fb624e7475a551d37e0b291b64bcf772c402a + pristine_git_object: d41f9c6fab670cf7c961f50b1302f9a88cf48162 + src/mistralai/azure/client/models/chatcompletionstreamrequest.py: + id: da00a7feb4ef + last_write_checksum: sha1:8bb36693fed73a50d59687ca8b30a2c443708610 + pristine_git_object: 2edfbed98462eab43f322b9c706721365e410bb9 + src/mistralai/azure/client/models/completionchunk.py: + id: 28d620f25510 + last_write_checksum: sha1:84d1c55ef7bdb438e7f536a604a070799d054281 + pristine_git_object: 0e64bbc8aa0293c9d763db56287f296909260c38 + src/mistralai/azure/client/models/completionevent.py: + id: a6f00a747933 + last_write_checksum: sha1:3d04bfbdaf11c52af5613ed0fd70c8dbc59f6d49 + pristine_git_object: c4b272871d9b3ea8443f469d29b0825706c25c00 + src/mistralai/azure/client/models/completionresponsestreamchoice.py: + id: 3ba5d7ba8a13 + last_write_checksum: sha1:4de311509c71c8f582b2c767febea89f1acd341a + pristine_git_object: 20a271401ff98d69525947ab929078af83aab1f1 + src/mistralai/azure/client/models/contentchunk.py: + id: 1f65e4f8f731 + last_write_checksum: sha1:cf11e1f061d3c8af040ebbdba0b25d4177e1cea4 + pristine_git_object: 17efcc7d5825461576cf61257908688cffd23eb7 + src/mistralai/azure/client/models/deltamessage.py: + id: b7dab1d158de + last_write_checksum: sha1:190c2809d575244eda5efbb1e00a4ec5811aea29 + pristine_git_object: 567e772fc1b376efaec1a2dfd660bc74a916f8ee + src/mistralai/azure/client/models/documenturlchunk.py: + id: e56fec6e977f + last_write_checksum: sha1:0313d94f343d46dac7cc3adc392feaf06fa2b2a4 + pristine_git_object: 2dea80056f6752bdaa5d00f391cb6f54371a9d2b + src/mistralai/azure/client/models/filechunk.py: + id: 150d9f180110 + last_write_checksum: sha1:6d12d630a5bfd601836f9cb3d63b9eb2f15f880d + pristine_git_object: 6baa0cba81535e157c0f81ae2648362f7bd1adbd + src/mistralai/azure/client/models/function.py: + id: 6d1e2011a14b + last_write_checksum: sha1:b064eca9256966603581d41b5b2c08cd2448224d + pristine_git_object: 055d3657fd98da63b80deb8cd2054e95a0e66a2b + src/mistralai/azure/client/models/functioncall.py: + id: ced560a1bd57 + last_write_checksum: sha1:490cb3a0305994de063e06fa4c77defa911271f3 + pristine_git_object: d476792ccbb5aa2002deb870f1c81cc1500f59d4 + src/mistralai/azure/client/models/functionname.py: + id: 6f09474ebc85 + last_write_checksum: sha1:651ceed24416ce8192f70db03cc5cd0db685899f + pristine_git_object: 839e0d557a902da6c819210962e38e1df9bda90f + src/mistralai/azure/client/models/imagedetail.py: + id: de211988043d + last_write_checksum: sha1:812f2ec4fc0d8d13db643ed49192384d5a841aa4 + pristine_git_object: 2d074cee614e1c49b69ee4073c3aaaa7a5a2c9e2 + src/mistralai/azure/client/models/imageurl.py: + id: c8882341c798 + last_write_checksum: sha1:8c3c08cc5d33c66b12539270b7edbf157d936f86 + pristine_git_object: bcb4fe43d334752be501d694543250d7e632a9c7 + src/mistralai/azure/client/models/imageurlchunk.py: + id: b6f0abb574d7 + last_write_checksum: sha1:417618d9d2aba85386a100dfe818d13342830526 + pristine_git_object: 7213c49846a4107271d017dd695648d98c2efa94 + src/mistralai/azure/client/models/jsonschema.py: + id: bfd486f4bb18 + last_write_checksum: sha1:ccb2b53bd2351ec5119d9a7914a1a42c2746a096 + pristine_git_object: 99f2fb8903562465687edfd300d8efd373b92247 + src/mistralai/azure/client/models/mistralpromptmode.py: + id: d0028b1e4129 + last_write_checksum: sha1:46fe1ab8ac2d5867877368a59a4aa5be2fabadeb + pristine_git_object: 26e7adbdc4a981c92d51b72542c966b0ba0fb8f8 + src/mistralai/azure/client/models/ocrimageobject.py: + id: 9c9f987d94bb + last_write_checksum: sha1:423effee97a4120a26ba78c2abe7f6adeb5c733d + pristine_git_object: a23515b346a0f9517fec0b2381e1b0c04cb31816 + src/mistralai/azure/client/models/ocrpagedimensions.py: + id: 7669a25f32b3 + last_write_checksum: sha1:60642db6bb61f0e96204fb78d3aa0bd80dd0a7e5 + pristine_git_object: 12858da92de99aa6da9d6e148df3ba7ee37496c7 + src/mistralai/azure/client/models/ocrpageobject.py: + id: eea193b05126 + last_write_checksum: sha1:b8370ac0611dc3eccf09dddf85d1c39d3a11224b + pristine_git_object: 434c8988f124f93180e6cefa15b3aee067937946 + src/mistralai/azure/client/models/ocrrequest.py: + id: 365a5b4776a2 + last_write_checksum: sha1:e684da1b6db18cb9c5ce95b9cc58556e05a9ea9b + pristine_git_object: a2cd341593c9db3644076d39352abca6815efc56 + src/mistralai/azure/client/models/ocrresponse.py: + id: b8cde8c16a4c + last_write_checksum: sha1:55e81631f6fe57aaf58178460e1c5fc69fa19377 + pristine_git_object: 3dc09fd770a064e69e84519bd0f0c9127ebd8176 + src/mistralai/azure/client/models/ocrtableobject.py: + id: c2cd51b8789e + last_write_checksum: sha1:86a8fd2241cf6a636e81e58484a90bdb7880085e + pristine_git_object: f1de5428a71f9d42cd9f9e764d0bbf88f3aad8cc + src/mistralai/azure/client/models/ocrusageinfo.py: + id: 5e9118cac468 + last_write_checksum: sha1:97887b58cfe6ebd9ebd5905c6c7485525d6dc788 + pristine_git_object: f63315d23a1659aee4333b45c4239861aa5220d7 + src/mistralai/azure/client/models/prediction.py: + id: bd6abfa93083 + last_write_checksum: sha1:07d06d5629af183f999e043744a67868ef779bcc + pristine_git_object: 1fa1d78248628ccdc102ce0631d344150addfd2d + src/mistralai/azure/client/models/referencechunk.py: + id: c9612f854670 + last_write_checksum: sha1:e81e758e00db915e68f58ffa1e03b2c473f64477 + pristine_git_object: f7af9bf9a73e0d782e5e6c6a7866af6fbc3668d8 + src/mistralai/azure/client/models/responseformat.py: + id: c124e7c316aa + last_write_checksum: sha1:d368a2d4320356b6daab1dd0c62c6c862e902ca0 + pristine_git_object: 20fd2b868506cff278d1d7dc719eddd56ea538b0 + src/mistralai/azure/client/models/responseformats.py: + id: fef416cefcd4 + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai/azure/client/models/security.py: + id: 4a2e4760ec08 + last_write_checksum: sha1:0cd2ae54cecd88cfd8d43e92c0d3da7efa48942c + pristine_git_object: 9b83ba98336090bed89fbeda40b4a07b212a1106 + src/mistralai/azure/client/models/systemmessage.py: + id: 8fa0dee9e4e1 + last_write_checksum: sha1:26167db704ece6ef1391d6f474e00f417bff4639 + pristine_git_object: d4bd004476ef653798295fa5df9de68b607f0132 + src/mistralai/azure/client/models/systemmessagecontentchunks.py: + id: 5918e770869d + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b + src/mistralai/azure/client/models/textchunk.py: + id: 9c81c76a6325 + last_write_checksum: sha1:28b8f4e030d365e5bf2f2f2720a7919b29616564 + pristine_git_object: 9295148588a143278ff5f48f9142347e35cfdab2 + src/mistralai/azure/client/models/thinkchunk.py: + id: df6bbd55b3eb + last_write_checksum: sha1:752a81be169fdd7a6afc293cf090b2cd4d2b22c9 + pristine_git_object: 4e881aad3b11d43aecaab922fe55bf7b4076c42f + src/mistralai/azure/client/models/tool.py: + id: 4075ef72c086 + last_write_checksum: sha1:4bef6d64b6426fdeff5031557c3c0e37f5c33b9a + pristine_git_object: 87329bdb73526120a3f63d48299114485a7fe038 + src/mistralai/azure/client/models/toolcall.py: + id: c65e6f79e539 + last_write_checksum: sha1:a3b36214b4533b79868630348762206a0e5ca26e + pristine_git_object: ada1ea65136fa58dce55f2857d895ea916bcd41f + src/mistralai/azure/client/models/toolchoice.py: + id: c25062b5de34 + last_write_checksum: sha1:6212c9366eb3b4f4062c86c00d4502dd03bf5ce1 + pristine_git_object: ddb9e1417c880c44a7f0505bfde839570fa3cd4a + src/mistralai/azure/client/models/toolchoiceenum.py: + id: cc06ba3a8d21 + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai/azure/client/models/toolmessage.py: + id: 84ac736fa955 + last_write_checksum: sha1:e4ed14906985fe74fd76a9adb09125ebc1218a1f + pristine_git_object: 670210de0d05b52ee9dffbbb808a87e67c2d37a9 + src/mistralai/azure/client/models/tooltypes.py: + id: fa881b046d34 + last_write_checksum: sha1:cd28ddc02fff9a5abbb59c82fe9e0dcbdb9b6d2a + pristine_git_object: 1cce7446f2772b998208ea1c78c7969e3881d5d0 + src/mistralai/azure/client/models/usageinfo.py: + id: 3edc9c81b329 + last_write_checksum: sha1:0ac2350e4efa1ed3ffd7d33ac91c3ef564d1d773 + pristine_git_object: 0f04c87c97ff3148106408a46618c848b86c4b37 + src/mistralai/azure/client/models/usermessage.py: + id: 3796508adc07 + last_write_checksum: sha1:8eb35fb07971d74cf2cb0858c037558f52df6aa9 + pristine_git_object: 549b01ca887651a95c5efc8aff3372d32dfdc277 + src/mistralai/azure/client/models/validationerror.py: + id: f2b84813e2ea + last_write_checksum: sha1:f0f9706a5af2ac4f6b234e768fdd492bbdd8a18c + pristine_git_object: 817ecf7a56470369ccacd0f5e0bb739656a5f92c + src/mistralai/azure/client/ocr.py: + id: 5817c10c9297 + last_write_checksum: sha1:7666ca9f4596cee080952b2f4096bd4176051680 + pristine_git_object: b9270f6a52406d8a9bf02d90c24ae540da6dfb9d + src/mistralai/azure/client/py.typed: + id: e88369f116d2 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/azure/client/sdkconfiguration.py: + id: 602f74633eed + last_write_checksum: sha1:163fe779949725d81181f39b70d6922fc2cb8099 + pristine_git_object: 919225f9bf2e4315f879f0da6c7f8b3e6157bd58 + src/mistralai/azure/client/types/__init__.py: + id: f79033f78412 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai/azure/client/types/basemodel.py: + id: fd244927c80c + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai/azure/client/utils/__init__.py: + id: 26f1a707325b + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 + src/mistralai/azure/client/utils/annotations.py: + id: bb1f6c189fdb + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai/azure/client/utils/datetimes.py: + id: 2b7db09ee0ab + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/azure/client/utils/dynamic_imports.py: + id: 0ac779c122d9 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 + src/mistralai/azure/client/utils/enums.py: + id: ffbdb1917a68 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai/azure/client/utils/eventstreaming.py: + id: bdc37b70360c + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b + src/mistralai/azure/client/utils/forms.py: + id: 51696122c557 + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d + src/mistralai/azure/client/utils/headers.py: + id: e42840c8cb13 + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai/azure/client/utils/logger.py: + id: 9db88755a137 + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai/azure/client/utils/metadata.py: + id: 44f85bd3b2e2 + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai/azure/client/utils/queryparams.py: + id: ec1c03114156 + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai/azure/client/utils/requestbodies.py: + id: 1030c47d624d + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai/azure/client/utils/retries.py: + id: d50ed6e400b2 + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e + src/mistralai/azure/client/utils/security.py: + id: 1d35741ce5f1 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed + src/mistralai/azure/client/utils/serializers.py: + id: a1f26d73c3ad + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/azure/client/utils/unions.py: + id: 9abcc9913e3f + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e + src/mistralai/azure/client/utils/unmarshal_json_response.py: + id: 947f4fc4db62 + last_write_checksum: sha1:75931131ff498a66a48cfb32dd9d5d61f2c9b4d1 + pristine_git_object: fe0c9b8ecabf8f89e363a050837582df40d67fb4 + src/mistralai/azure/client/utils/url.py: + id: 4976c88d0e3b + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai/azure/client/utils/values.py: + id: 3974a1553447 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "azureai", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "azureai", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} + responses: + "200": + application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} +examplesVersion: 1.0.2 +generatedTests: {} +generatedFiles: + - .gitattributes + - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/filechunk.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/jsonschema.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/mistralpromptmode.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrusageinfo.md + - docs/models/prediction.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - py.typed + - pylintrc + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai_azure/__init__.py + - src/mistralai_azure/_hooks/__init__.py + - src/mistralai_azure/_hooks/sdkhooks.py + - src/mistralai_azure/_hooks/types.py + - src/mistralai_azure/_version.py + - src/mistralai_azure/basesdk.py + - src/mistralai_azure/chat.py + - src/mistralai_azure/httpclient.py + - src/mistralai_azure/models/__init__.py + - src/mistralai_azure/models/assistantmessage.py + - src/mistralai_azure/models/chatcompletionchoice.py + - src/mistralai_azure/models/chatcompletionrequest.py + - src/mistralai_azure/models/chatcompletionresponse.py + - src/mistralai_azure/models/chatcompletionstreamrequest.py + - src/mistralai_azure/models/completionchunk.py + - src/mistralai_azure/models/completionevent.py + - src/mistralai_azure/models/completionresponsestreamchoice.py + - src/mistralai_azure/models/contentchunk.py + - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/documenturlchunk.py + - src/mistralai_azure/models/filechunk.py + - src/mistralai_azure/models/function.py + - src/mistralai_azure/models/functioncall.py + - src/mistralai_azure/models/functionname.py + - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/imageurl.py + - src/mistralai_azure/models/imageurlchunk.py + - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/mistralazureerror.py + - src/mistralai_azure/models/mistralpromptmode.py + - src/mistralai_azure/models/no_response_error.py + - src/mistralai_azure/models/ocrimageobject.py + - src/mistralai_azure/models/ocrpagedimensions.py + - src/mistralai_azure/models/ocrpageobject.py + - src/mistralai_azure/models/ocrrequest.py + - src/mistralai_azure/models/ocrresponse.py + - src/mistralai_azure/models/ocrusageinfo.py + - src/mistralai_azure/models/prediction.py + - src/mistralai_azure/models/referencechunk.py + - src/mistralai_azure/models/responseformat.py + - src/mistralai_azure/models/responseformats.py + - src/mistralai_azure/models/responsevalidationerror.py + - src/mistralai_azure/models/sdkerror.py + - src/mistralai_azure/models/security.py + - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/systemmessagecontentchunks.py + - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/thinkchunk.py + - src/mistralai_azure/models/tool.py + - src/mistralai_azure/models/toolcall.py + - src/mistralai_azure/models/toolchoice.py + - src/mistralai_azure/models/toolchoiceenum.py + - src/mistralai_azure/models/toolmessage.py + - src/mistralai_azure/models/tooltypes.py + - src/mistralai_azure/models/usageinfo.py + - src/mistralai_azure/models/usermessage.py + - src/mistralai_azure/models/validationerror.py + - src/mistralai_azure/ocr.py + - src/mistralai_azure/py.typed + - src/mistralai_azure/sdkconfiguration.py + - src/mistralai_azure/types/__init__.py + - src/mistralai_azure/types/basemodel.py + - src/mistralai_azure/utils/__init__.py + - src/mistralai_azure/utils/annotations.py + - src/mistralai_azure/utils/datetimes.py + - src/mistralai_azure/utils/enums.py + - src/mistralai_azure/utils/eventstreaming.py + - src/mistralai_azure/utils/forms.py + - src/mistralai_azure/utils/headers.py + - src/mistralai_azure/utils/logger.py + - src/mistralai_azure/utils/metadata.py + - src/mistralai_azure/utils/queryparams.py + - src/mistralai_azure/utils/requestbodies.py + - src/mistralai_azure/utils/retries.py + - src/mistralai_azure/utils/security.py + - src/mistralai_azure/utils/serializers.py + - src/mistralai_azure/utils/unmarshal_json_response.py + - src/mistralai_azure/utils/url.py + - src/mistralai_azure/utils/values.py diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml new file mode 100644 index 00000000..55934cc8 --- /dev/null +++ b/packages/azure/.speakeasy/gen.yaml @@ -0,0 +1,88 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralAzure + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + sdkInitStyle: constructor + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + nameResolutionFeb2025: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + securityFeb2025: true + sharedErrorComponentsApr2025: true + sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true + auth: + oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + versioningStrategy: automatic + persistentEdits: {} + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false +python: + version: 2.0.0b1 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: {} + allowedRedefinedBuiltins: + - id + - object + - input + - dir + asyncMode: both + authors: + - Mistral + baseErrorName: MistralAzureError + clientServerStatusCodesAsErrors: true + constFieldCasing: normal + defaultErrorName: SDKError + description: Python Client SDK for the Mistral AI API in Azure. + enableCustomCodeRegions: false + enumFormat: union + fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true + responseRequiredSep2024: true + flatAdditionalProperties: true + flattenGlobalSecurity: true + flattenRequests: true + flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only + imports: + option: openapi + paths: + callbacks: "" + errors: errors + operations: "" + shared: "" + webhooks: "" + inferUnionDiscriminators: true + inputModelSuffix: input + license: "" + maxMethodParams: 999 + methodArguments: infer-optional-args + moduleName: mistralai.azure.client + multipartArrayFormat: standard + outputModelSuffix: output + packageManager: uv + packageName: mistralai-azure + preApplyUnionDiscriminators: true + pytestFilterWarnings: [] + pytestTimeout: 0 + responseFormat: flat + sseFlatResponse: false + templateVersion: v2 + useAsyncHooks: false diff --git a/packages/mistralai_azure/CONTRIBUTING.md b/packages/azure/CONTRIBUTING.md similarity index 100% rename from packages/mistralai_azure/CONTRIBUTING.md rename to packages/azure/CONTRIBUTING.md diff --git a/packages/azure/README.md b/packages/azure/README.md new file mode 100644 index 00000000..6eff040f --- /dev/null +++ b/packages/azure/README.md @@ -0,0 +1,464 @@ +# Mistral on Azure Python Client + +## SDK Installation + +PIP +```bash +pip install mistralai +``` + +UV +```bash +uv add mistralai +``` + +**Prerequisites** + +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically injects the `api-version` query parameter. + +```python +# Synchronous Example +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +async def main(): + # The SDK automatically injects api-version as a query parameter + s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + res = await s.chat.complete_async( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + ) + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ), +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ["AZURE_API_VERSION"] + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai.azure.client import MistralAzure +from mistralai.azure.client import models +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = None +try: + res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + ) + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Override Server URL Per-Client + +For Azure, you must provide your Azure AI Foundry endpoint via `server_url`. The SDK automatically injects the `api-version` query parameter: +```python +from mistralai.azure.client import MistralAzure +import os + +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai.azure.client import MistralAzure +import httpx +import os + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), + client=http_client, +) +``` + +or you could wrap the client with your own custom logic: +```python +from typing import Any, Optional, Union +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralAzure( + api_key="", + server_url="", + async_client=CustomClient(httpx.AsyncClient()), +) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. You must also provide `server_url` pointing to your Azure AI Foundry endpoint. The SDK automatically injects the `api-version` query parameter: +```python +from mistralai.azure.client import MistralAzure +import os + +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/azure/RELEASES.md b/packages/azure/RELEASES.md new file mode 100644 index 00000000..e625ee98 --- /dev/null +++ b/packages/azure/RELEASES.md @@ -0,0 +1,21 @@ + + +## 2026-01-12 15:30:32 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] packages/mistralai_azure +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure + +## 2026-02-25 17:39:51 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/azure +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure \ No newline at end of file diff --git a/packages/azure/USAGE.md b/packages/azure/USAGE.md new file mode 100644 index 00000000..a4bc5147 --- /dev/null +++ b/packages/azure/USAGE.md @@ -0,0 +1,70 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically injects the `api-version` query parameter. + +```python +# Synchronous Example +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +async def main(): + # The SDK automatically injects api-version as a query parameter + s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model=AZURE_MODEL) + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/azure/docs/errors/httpvalidationerror.md similarity index 100% rename from packages/mistralai_azure/docs/models/httpvalidationerror.md rename to packages/azure/docs/errors/httpvalidationerror.md diff --git a/packages/mistralai_azure/docs/models/arguments.md b/packages/azure/docs/models/arguments.md similarity index 100% rename from packages/mistralai_azure/docs/models/arguments.md rename to packages/azure/docs/models/arguments.md diff --git a/packages/azure/docs/models/assistantmessage.md b/packages/azure/docs/models/assistantmessage.md new file mode 100644 index 00000000..9ef63837 --- /dev/null +++ b/packages/azure/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagecontent.md b/packages/azure/docs/models/assistantmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/assistantmessagecontent.md rename to packages/azure/docs/models/assistantmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/azure/docs/models/chatcompletionchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionchoice.md rename to packages/azure/docs/models/chatcompletionchoice.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/azure/docs/models/chatcompletionchoicefinishreason.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md rename to packages/azure/docs/models/chatcompletionchoicefinishreason.md diff --git a/packages/azure/docs/models/chatcompletionrequest.md b/packages/azure/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..f8715cd0 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequest.md @@ -0,0 +1,26 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionrequestmessage.md b/packages/azure/docs/models/chatcompletionrequestmessage.md new file mode 100644 index 00000000..91e9e062 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md b/packages/azure/docs/models/chatcompletionrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md rename to packages/azure/docs/models/chatcompletionrequeststop.md diff --git a/packages/azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..dc82a8ef --- /dev/null +++ b/packages/azure/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionresponse.md b/packages/azure/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..a0465ffb --- /dev/null +++ b/packages/azure/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionstreamrequest.md b/packages/azure/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..cc82a8c7 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,26 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/azure/docs/models/chatcompletionstreamrequestmessage.md b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md new file mode 100644 index 00000000..2e4e93ac --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionstreamrequeststop.md b/packages/azure/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 00000000..a48460a9 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..43f3ca38 --- /dev/null +++ b/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/azure/docs/models/completionchunk.md b/packages/azure/docs/models/completionchunk.md new file mode 100644 index 00000000..7f8ab5e6 --- /dev/null +++ b/packages/azure/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionevent.md b/packages/azure/docs/models/completionevent.md similarity index 100% rename from packages/mistralai_azure/docs/models/completionevent.md rename to packages/azure/docs/models/completionevent.md diff --git a/packages/azure/docs/models/completionresponsestreamchoice.md b/packages/azure/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/azure/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 00000000..0fece473 --- /dev/null +++ b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/azure/docs/models/contentchunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/contentchunk.md rename to packages/azure/docs/models/contentchunk.md diff --git a/packages/azure/docs/models/deltamessage.md b/packages/azure/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/azure/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/deltamessagecontent.md b/packages/azure/docs/models/deltamessagecontent.md new file mode 100644 index 00000000..8142772d --- /dev/null +++ b/packages/azure/docs/models/deltamessagecontent.md @@ -0,0 +1,17 @@ +# DeltaMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/azure/docs/models/document.md b/packages/azure/docs/models/document.md new file mode 100644 index 00000000..509d43b7 --- /dev/null +++ b/packages/azure/docs/models/document.md @@ -0,0 +1,25 @@ +# Document + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/packages/azure/docs/models/documenturlchunk.md b/packages/azure/docs/models/documenturlchunk.md new file mode 100644 index 00000000..9dbfbe50 --- /dev/null +++ b/packages/azure/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/packages/azure/docs/models/filechunk.md b/packages/azure/docs/models/filechunk.md new file mode 100644 index 00000000..18217114 --- /dev/null +++ b/packages/azure/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/format_.md b/packages/azure/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/packages/azure/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/packages/azure/docs/models/function.md b/packages/azure/docs/models/function.md new file mode 100644 index 00000000..b2bdb3fe --- /dev/null +++ b/packages/azure/docs/models/function.md @@ -0,0 +1,11 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/functioncall.md b/packages/azure/docs/models/functioncall.md similarity index 100% rename from packages/mistralai_azure/docs/models/functioncall.md rename to packages/azure/docs/models/functioncall.md diff --git a/packages/mistralai_azure/docs/models/functionname.md b/packages/azure/docs/models/functionname.md similarity index 100% rename from packages/mistralai_azure/docs/models/functionname.md rename to packages/azure/docs/models/functionname.md diff --git a/packages/azure/docs/models/imagedetail.md b/packages/azure/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/azure/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurl.md b/packages/azure/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/azure/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurlchunk.md b/packages/azure/docs/models/imageurlchunk.md new file mode 100644 index 00000000..db0c53d2 --- /dev/null +++ b/packages/azure/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurlunion.md b/packages/azure/docs/models/imageurlunion.md new file mode 100644 index 00000000..db97130f --- /dev/null +++ b/packages/azure/docs/models/imageurlunion.md @@ -0,0 +1,17 @@ +# ImageURLUnion + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/azure/docs/models/jsonschema.md b/packages/azure/docs/models/jsonschema.md new file mode 100644 index 00000000..7ff7c070 --- /dev/null +++ b/packages/azure/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/loc.md b/packages/azure/docs/models/loc.md similarity index 100% rename from packages/mistralai_azure/docs/models/loc.md rename to packages/azure/docs/models/loc.md diff --git a/packages/azure/docs/models/mistralpromptmode.md b/packages/azure/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..c3409d03 --- /dev/null +++ b/packages/azure/docs/models/mistralpromptmode.md @@ -0,0 +1,12 @@ +# MistralPromptMode + +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrimageobject.md b/packages/azure/docs/models/ocrimageobject.md new file mode 100644 index 00000000..3c0d5544 --- /dev/null +++ b/packages/azure/docs/models/ocrimageobject.md @@ -0,0 +1,14 @@ +# OCRImageObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Image ID for extracted image in a page | +| `top_left_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of top-left corner of the extracted image | +| `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | +| `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | +| `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | +| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrpagedimensions.md b/packages/azure/docs/models/ocrpagedimensions.md new file mode 100644 index 00000000..c93ca64d --- /dev/null +++ b/packages/azure/docs/models/ocrpagedimensions.md @@ -0,0 +1,10 @@ +# OCRPageDimensions + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `dpi` | *int* | :heavy_check_mark: | Dots per inch of the page-image | +| `height` | *int* | :heavy_check_mark: | Height of the image in pixels | +| `width` | *int* | :heavy_check_mark: | Width of the image in pixels | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrpageobject.md b/packages/azure/docs/models/ocrpageobject.md new file mode 100644 index 00000000..02473d44 --- /dev/null +++ b/packages/azure/docs/models/ocrpageobject.md @@ -0,0 +1,15 @@ +# OCRPageObject + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | +| `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | +| `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | +| `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrrequest.md b/packages/azure/docs/models/ocrrequest.md new file mode 100644 index 00000000..2d26c19f --- /dev/null +++ b/packages/azure/docs/models/ocrrequest.md @@ -0,0 +1,20 @@ +# OCRRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrresponse.md b/packages/azure/docs/models/ocrresponse.md new file mode 100644 index 00000000..0a309317 --- /dev/null +++ b/packages/azure/docs/models/ocrresponse.md @@ -0,0 +1,11 @@ +# OCRResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrtableobject.md b/packages/azure/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/packages/azure/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/packages/azure/docs/models/ocrusageinfo.md b/packages/azure/docs/models/ocrusageinfo.md new file mode 100644 index 00000000..d9d79125 --- /dev/null +++ b/packages/azure/docs/models/ocrusageinfo.md @@ -0,0 +1,9 @@ +# OCRUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `pages_processed` | *int* | :heavy_check_mark: | Number of pages processed | +| `doc_size_bytes` | *OptionalNullable[int]* | :heavy_minus_sign: | Document size in bytes | \ No newline at end of file diff --git a/packages/azure/docs/models/prediction.md b/packages/azure/docs/models/prediction.md new file mode 100644 index 00000000..fae3c1ca --- /dev/null +++ b/packages/azure/docs/models/prediction.md @@ -0,0 +1,11 @@ +# Prediction + +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/referencechunk.md b/packages/azure/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/azure/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/responseformat.md b/packages/azure/docs/models/responseformat.md new file mode 100644 index 00000000..5cab22f2 --- /dev/null +++ b/packages/azure/docs/models/responseformat.md @@ -0,0 +1,11 @@ +# ResponseFormat + +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/responseformats.md b/packages/azure/docs/models/responseformats.md new file mode 100644 index 00000000..2f5f1e55 --- /dev/null +++ b/packages/azure/docs/models/responseformats.md @@ -0,0 +1,10 @@ +# ResponseFormats + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/security.md b/packages/azure/docs/models/security.md similarity index 100% rename from packages/mistralai_azure/docs/models/security.md rename to packages/azure/docs/models/security.md diff --git a/packages/azure/docs/models/systemmessage.md b/packages/azure/docs/models/systemmessage.md new file mode 100644 index 00000000..10bda10f --- /dev/null +++ b/packages/azure/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/systemmessagecontent.md b/packages/azure/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..0c87baf3 --- /dev/null +++ b/packages/azure/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.SystemMessageContentChunks]` + +```python +value: List[models.SystemMessageContentChunks] = /* values here */ +``` + diff --git a/packages/azure/docs/models/systemmessagecontentchunks.md b/packages/azure/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/azure/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/azure/docs/models/tableformat.md b/packages/azure/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/packages/azure/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/packages/azure/docs/models/textchunk.md b/packages/azure/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/azure/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/thinkchunk.md b/packages/azure/docs/models/thinkchunk.md new file mode 100644 index 00000000..b07f598e --- /dev/null +++ b/packages/azure/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/docs/models/thinking.md b/packages/azure/docs/models/thinking.md similarity index 100% rename from docs/models/thinking.md rename to packages/azure/docs/models/thinking.md diff --git a/packages/azure/docs/models/tool.md b/packages/azure/docs/models/tool.md new file mode 100644 index 00000000..fb661f72 --- /dev/null +++ b/packages/azure/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/toolcall.md b/packages/azure/docs/models/toolcall.md new file mode 100644 index 00000000..3819236b --- /dev/null +++ b/packages/azure/docs/models/toolcall.md @@ -0,0 +1,11 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/toolchoice.md b/packages/azure/docs/models/toolchoice.md new file mode 100644 index 00000000..373046bb --- /dev/null +++ b/packages/azure/docs/models/toolchoice.md @@ -0,0 +1,11 @@ +# ToolChoice + +ToolChoice is either a ToolChoiceEnum or a ToolChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoiceenum.md b/packages/azure/docs/models/toolchoiceenum.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolchoiceenum.md rename to packages/azure/docs/models/toolchoiceenum.md diff --git a/packages/azure/docs/models/toolmessage.md b/packages/azure/docs/models/toolmessage.md new file mode 100644 index 00000000..7201481e --- /dev/null +++ b/packages/azure/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagecontent.md b/packages/azure/docs/models/toolmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolmessagecontent.md rename to packages/azure/docs/models/toolmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/tooltypes.md b/packages/azure/docs/models/tooltypes.md similarity index 100% rename from packages/mistralai_azure/docs/models/tooltypes.md rename to packages/azure/docs/models/tooltypes.md diff --git a/packages/azure/docs/models/usageinfo.md b/packages/azure/docs/models/usageinfo.md new file mode 100644 index 00000000..f5204ac9 --- /dev/null +++ b/packages/azure/docs/models/usageinfo.md @@ -0,0 +1,12 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/usermessage.md b/packages/azure/docs/models/usermessage.md new file mode 100644 index 00000000..e7a932ed --- /dev/null +++ b/packages/azure/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagecontent.md b/packages/azure/docs/models/usermessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/usermessagecontent.md rename to packages/azure/docs/models/usermessagecontent.md diff --git a/packages/mistralai_azure/docs/models/utils/retryconfig.md b/packages/azure/docs/models/utils/retryconfig.md similarity index 100% rename from packages/mistralai_azure/docs/models/utils/retryconfig.md rename to packages/azure/docs/models/utils/retryconfig.md diff --git a/packages/mistralai_azure/docs/models/validationerror.md b/packages/azure/docs/models/validationerror.md similarity index 100% rename from packages/mistralai_azure/docs/models/validationerror.md rename to packages/azure/docs/models/validationerror.md diff --git a/packages/azure/docs/sdks/chat/README.md b/packages/azure/docs/sdks/chat/README.md new file mode 100644 index 00000000..560ffa83 --- /dev/null +++ b/packages/azure/docs/sdks/chat/README.md @@ -0,0 +1,141 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [complete](#complete) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## complete + +Chat Completion + +### Example Usage + +```python +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + # handle response + print(res.choices[0].message.content) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_azure/docs/sdks/mistralazure/README.md b/packages/azure/docs/sdks/mistralazure/README.md similarity index 100% rename from packages/mistralai_azure/docs/sdks/mistralazure/README.md rename to packages/azure/docs/sdks/mistralazure/README.md diff --git a/packages/mistralai_azure/py.typed b/packages/azure/py.typed similarity index 100% rename from packages/mistralai_azure/py.typed rename to packages/azure/py.typed diff --git a/packages/azure/pylintrc b/packages/azure/pylintrc new file mode 100644 index 00000000..0391ac11 --- /dev/null +++ b/packages/azure/pylintrc @@ -0,0 +1,664 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id, + n + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import, + too-many-return-statements, + redefined-builtin + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object,input,dir + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/azure/pyproject.toml b/packages/azure/pyproject.toml new file mode 100644 index 00000000..cf80bde8 --- /dev/null +++ b/packages/azure/pyproject.toml @@ -0,0 +1,65 @@ +[project] +name = "mistralai-azure" +version = "2.0.0b1" +description = "Python Client SDK for the Mistral AI API in Azure." +authors = [{ name = "Mistral" }] +requires-python = ">=3.10" +readme = "README.md" +dependencies = [ + "httpcore >=1.0.9", + "httpx >=0.28.1", + "pydantic >=2.11.2", +] + +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pyright>=1.1.401,<2", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", +] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/azure/client/py.typed"] + +[tool.hatch.build.targets.sdist] +include = ["src/mistralai"] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/azure/client/py.typed" = "src/mistralai/azure/client/py.typed" + +[tool.hatch.build.targets.wheel] +include = ["src/mistralai"] + +[tool.hatch.build.targets.wheel.sources] +"src" = "" + +[virtualenvs] +in-project = true + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" diff --git a/packages/azure/scripts/prepare_readme.py b/packages/azure/scripts/prepare_readme.py new file mode 100644 index 00000000..2b2577ea --- /dev/null +++ b/packages/azure/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/azure" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/azure/scripts/publish.sh b/packages/azure/scripts/publish.sh new file mode 100755 index 00000000..c35748f3 --- /dev/null +++ b/packages/azure/scripts/publish.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/azure/src/mistralai/azure/client/__init__.py b/packages/azure/src/mistralai/azure/client/__init__.py new file mode 100644 index 00000000..833c68cd --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/__init__.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py b/packages/azure/src/mistralai/azure/client/_hooks/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py rename to packages/azure/src/mistralai/azure/client/_hooks/__init__.py diff --git a/packages/azure/src/mistralai/azure/client/_hooks/registration.py b/packages/azure/src/mistralai/azure/client/_hooks/registration.py new file mode 100644 index 00000000..d5a49cc3 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/registration.py @@ -0,0 +1,12 @@ +from .types import Hooks + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(_hooks: Hooks) -> None: + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" diff --git a/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..2080681b --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.azure.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/azure/src/mistralai/azure/client/_hooks/types.py b/packages/azure/src/mistralai/azure/client/_hooks/types.py new file mode 100644 index 00000000..3e4e3955 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_hooks/types.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from abc import ABC, abstractmethod +import httpx +from mistralai.azure.client.httpclient import HttpClient +from mistralai.azure.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py new file mode 100644 index 00000000..213648be --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai-azure" +__version__: str = "2.0.0b1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-azure" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/azure/src/mistralai/azure/client/basesdk.py b/packages/azure/src/mistralai/azure/client/basesdk.py new file mode 100644 index 00000000..0d4d9a44 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/basesdk.py @@ -0,0 +1,384 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.azure.client import errors, utils +from mistralai.azure.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.azure.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None + ) + + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/azure/src/mistralai/azure/client/chat.py b/packages/azure/src/mistralai/azure/client/chat.py new file mode 100644 index 00000000..1051f952 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/chat.py @@ -0,0 +1,713 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.azure.client import errors, models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import OptionalNullable, UNSET +from mistralai.azure.client.utils import eventstreaming +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + def stream( + self, + *, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def complete( + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + model: Optional[str] = "azureai", + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/errors/__init__.py b/packages/azure/src/mistralai/azure/client/errors/__init__.py new file mode 100644 index 00000000..79e2712c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralazureerror import MistralAzureError +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralAzureError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py new file mode 100644 index 00000000..b4f2691e --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.azure.client.errors import MistralAzureError +from mistralai.azure.client.models import validationerror as models_validationerror +from mistralai.azure.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[models_validationerror.ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralAzureError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py b/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py new file mode 100644 index 00000000..c5bf1752 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralAzureError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/packages/azure/src/mistralai/azure/client/errors/no_response_error.py b/packages/azure/src/mistralai/azure/client/errors/no_response_error.py new file mode 100644 index 00000000..1deab64b --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/no_response_error.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py new file mode 100644 index 00000000..02397334 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.azure.client.errors import MistralAzureError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralAzureError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/azure/src/mistralai/azure/client/errors/sdkerror.py b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py new file mode 100644 index 00000000..c4f3616c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.azure.client.errors import MistralAzureError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralAzureError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/packages/azure/src/mistralai/azure/client/httpclient.py b/packages/azure/src/mistralai/azure/client/httpclient.py new file mode 100644 index 00000000..89560b56 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/httpclient.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/packages/azure/src/mistralai/azure/client/models/__init__.py b/packages/azure/src/mistralai/azure/client/models/__init__.py new file mode 100644 index 00000000..908dda32 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/__init__.py @@ -0,0 +1,390 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk + from .deltamessage import ( + DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, + DeltaMessageTypedDict, + ) + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict + from .filechunk import FileChunk, FileChunkTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .imagedetail import ImageDetail + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .security import Security, SecurityTypedDict + from .systemmessage import ( + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "ContentChunk", + "ContentChunkTypedDict", + "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", + "DeltaMessageTypedDict", + "Document", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkTypedDict", + "FileChunk", + "FileChunkTypedDict", + "Format", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "ImageDetail", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "Loc", + "LocTypedDict", + "MistralPromptMode", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", + "Prediction", + "PredictionTypedDict", + "ReferenceChunk", + "ReferenceChunkTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Security", + "SecurityTypedDict", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TableFormat", + "TextChunk", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "UnknownContentChunk", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", + "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "ImageDetail": ".imagedetail", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "Document": ".ocrrequest", + "DocumentTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "Security": ".security", + "SecurityTypedDict": ".security", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/models/assistantmessage.py b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py new file mode 100644 index 00000000..e9ae6e82 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + +class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..67b5ba69 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.azure.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +ChatCompletionChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: ChatCompletionChoiceFinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: ChatCompletionChoiceFinishReason diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..edd0fdc7 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py @@ -0,0 +1,225 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + messages: List[ChatCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[str] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + messages: List[ChatCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + model: Optional[str] = "azureai" + r"""The ID of the model to use for this request.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..d41f9c6f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.azure.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..2edfbed9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + messages: List[ChatCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[str] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + messages: List[ChatCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + model: Optional[str] = "azureai" + r"""The ID of the model to use for this request.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionchunk.py b/packages/azure/src/mistralai/azure/client/models/completionchunk.py new file mode 100644 index 00000000..0e64bbc8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/completionchunk.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionevent.py b/packages/azure/src/mistralai/azure/client/models/completionevent.py new file mode 100644 index 00000000..c4b27287 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/completionevent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.azure.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..20a27140 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/contentchunk.py b/packages/azure/src/mistralai/azure/client/models/contentchunk.py new file mode 100644 index 00000000..17efcc7d --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/contentchunk.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from functools import partial +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], +) + + +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + +ContentChunk = Annotated[ + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), +] diff --git a/packages/azure/src/mistralai/azure/client/models/deltamessage.py b/packages/azure/src/mistralai/azure/client/models/deltamessage.py new file mode 100644 index 00000000..567e772f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/deltamessage.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[DeltaMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py new file mode 100644 index 00000000..2dea8005 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + type: Literal["document_url"] + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + + +class DocumentURLChunk(BaseModel): + document_url: str + + type: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/filechunk.py b/packages/azure/src/mistralai/azure/client/models/filechunk.py new file mode 100644 index 00000000..6baa0cba --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/filechunk.py @@ -0,0 +1,46 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + type: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/function.py b/packages/azure/src/mistralai/azure/client/models/function.py new file mode 100644 index 00000000..055d3657 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/function.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/functioncall.py b/packages/azure/src/mistralai/azure/client/models/functioncall.py new file mode 100644 index 00000000..d476792c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/functioncall.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/packages/azure/src/mistralai/azure/client/models/functionname.py b/packages/azure/src/mistralai/azure/client/models/functionname.py new file mode 100644 index 00000000..839e0d55 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/packages/azure/src/mistralai/azure/client/models/imagedetail.py b/packages/azure/src/mistralai/azure/client/models/imagedetail.py new file mode 100644 index 00000000..2d074cee --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/azure/src/mistralai/azure/client/models/imageurl.py b/packages/azure/src/mistralai/azure/client/models/imageurl.py new file mode 100644 index 00000000..bcb4fe43 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imageurl.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imagedetail import ImageDetail +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[ImageDetail]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[ImageDetail] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py new file mode 100644 index 00000000..7213c498 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + type: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/jsonschema.py b/packages/azure/src/mistralai/azure/client/models/jsonschema.py new file mode 100644 index 00000000..99f2fb89 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/jsonschema.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py new file mode 100644 index 00000000..26e7adbd --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py new file mode 100644 index 00000000..a23515b3 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py new file mode 100644 index 00000000..12858da9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py new file mode 100644 index 00000000..434c8988 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrrequest.py b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py new file mode 100644 index 00000000..a2cd3415 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py @@ -0,0 +1,145 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +TableFormat = Literal[ + "markdown", + "html", +] + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrresponse.py b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py new file mode 100644 index 00000000..3dc09fd7 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py new file mode 100644 index 00000000..f1de5428 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UnrecognizedStr +import pydantic +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict + + +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py new file mode 100644 index 00000000..f63315d2 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/prediction.py b/packages/azure/src/mistralai/azure/client/models/prediction.py new file mode 100644 index 00000000..1fa1d782 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/prediction.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/referencechunk.py b/packages/azure/src/mistralai/azure/client/models/referencechunk.py new file mode 100644 index 00000000..f7af9bf9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/referencechunk.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/responseformat.py b/packages/azure/src/mistralai/azure/client/models/responseformat.py new file mode 100644 index 00000000..20fd2b86 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/responseformat.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/responseformats.py b/packages/azure/src/mistralai/azure/client/models/responseformats.py new file mode 100644 index 00000000..cbf83ce7 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/responseformats.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/azure/src/mistralai/azure/client/models/security.py b/packages/azure/src/mistralai/azure/client/models/security.py new file mode 100644 index 00000000..9b83ba98 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/security.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import FieldMetadata, SecurityMetadata +from typing_extensions import Annotated, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessage.py b/packages/azure/src/mistralai/azure/client/models/systemmessage.py new file mode 100644 index 00000000..d4bd0044 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/systemmessage.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: Literal["system"] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..8de71c90 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from pydantic import Field +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[TextChunk, ThinkChunk], Field(discriminator="type") +] diff --git a/packages/azure/src/mistralai/azure/client/models/textchunk.py b/packages/azure/src/mistralai/azure/client/models/textchunk.py new file mode 100644 index 00000000..92951485 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/textchunk.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + type: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/thinkchunk.py b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py new file mode 100644 index 00000000..4e881aad --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/tool.py b/packages/azure/src/mistralai/azure/client/models/tool.py new file mode 100644 index 00000000..87329bdb --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/tool.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolcall.py b/packages/azure/src/mistralai/azure/client/models/toolcall.py new file mode 100644 index 00000000..ada1ea65 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolcall.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolchoice.py b/packages/azure/src/mistralai/azure/client/models/toolchoice.py new file mode 100644 index 00000000..ddb9e141 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolchoice.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py b/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py new file mode 100644 index 00000000..01f6f677 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/azure/src/mistralai/azure/client/models/toolmessage.py b/packages/azure/src/mistralai/azure/client/models/toolmessage.py new file mode 100644 index 00000000..670210de --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/toolmessage.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + role: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/tooltypes.py b/packages/azure/src/mistralai/azure/client/models/tooltypes.py new file mode 100644 index 00000000..1cce7446 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/azure/src/mistralai/azure/client/models/usageinfo.py b/packages/azure/src/mistralai/azure/client/models/usageinfo.py new file mode 100644 index 00000000..0f04c87c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/usageinfo.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/usermessage.py b/packages/azure/src/mistralai/azure/client/models/usermessage.py new file mode 100644 index 00000000..549b01ca --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/usermessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.azure.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: Literal["user"] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/validationerror.py b/packages/azure/src/mistralai/azure/client/models/validationerror.py new file mode 100644 index 00000000..817ecf7a --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/validationerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/packages/azure/src/mistralai/azure/client/ocr.py b/packages/azure/src/mistralai/azure/client/ocr.py new file mode 100644 index 00000000..b9270f6a --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/ocr.py @@ -0,0 +1,276 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.azure.client import errors, models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import Nullable, OptionalNullable, UNSET +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + def process( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request( + method="POST", + path="/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request_async( + method="POST", + path="/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_azure/src/mistralai_azure/py.typed b/packages/azure/src/mistralai/azure/client/py.typed similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/py.typed rename to packages/azure/src/mistralai/azure/client/py.typed diff --git a/packages/azure/src/mistralai/azure/client/sdk.py b/packages/azure/src/mistralai/azure/client/sdk.py new file mode 100644 index 00000000..985cb9a8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/sdk.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import httpx +import importlib +import logging +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import SDKHooks +from mistralai.azure.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import warnings +import weakref + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from mistralai.azure.client.chat import Chat + from mistralai.azure.client.ocr import Ocr + + +class MistralAzure(BaseSDK): + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ + + chat: "Chat" + r"""Chat Completion API.""" + ocr: "Ocr" + _sub_sdk_map = { + "chat": ("mistralai.azure.client.chat", "Chat"), + "ocr": ("mistralai.azure.client.ocr", "Ocr"), + } + + def __init__( + self, + api_key: Union[str, Callable[[], str]], + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + api_version: str = "2024-05-01-preview", + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + :param api_version: Azure API version to use (injected as query param) + """ + client_supplied = True + if client is None: + client = httpx.Client( + follow_redirects=True, + params={"api-version": api_version}, + ) + client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom client is provided. " + "Set the api-version query parameter on your httpx.Client directly.", + stacklevel=2, + ) + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient( + follow_redirects=True, + params={"api-version": api_version}, + ) + async_client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom async_client is provided. " + "Set the api-version query parameter on your httpx.AsyncClient directly.", + stacklevel=2, + ) + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Union[models.Security, Callable[[], models.Security]] + if callable(api_key): + + def get_security() -> models.Security: + return models.Security(api_key=api_key()) + + security = get_security + else: + security = models.Security(api_key=api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + self.sdk_configuration.__dict__["_hooks"] = hooks + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/azure/src/mistralai/azure/client/sdkconfiguration.py b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py new file mode 100644 index 00000000..919225f9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.azure.client import models +from mistralai.azure.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/packages/mistralai_azure/src/mistralai_azure/types/__init__.py b/packages/azure/src/mistralai/azure/client/types/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/types/__init__.py rename to packages/azure/src/mistralai/azure/client/types/__init__.py diff --git a/packages/azure/src/mistralai/azure/client/types/basemodel.py b/packages/azure/src/mistralai/azure/client/types/basemodel.py new file mode 100644 index 00000000..a9a640a1 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/types/basemodel.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/packages/azure/src/mistralai/azure/client/utils/__init__.py b/packages/azure/src/mistralai/azure/client/utils/__init__.py new file mode 100644 index 00000000..b488c2df --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/__init__.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .unions import parse_open_union + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "parse_open_union", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/utils/annotations.py b/packages/azure/src/mistralai/azure/client/utils/annotations.py new file mode 100644 index 00000000..12e0aa4f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/annotations.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/utils/datetimes.py b/packages/azure/src/mistralai/azure/client/utils/datetimes.py similarity index 100% rename from src/mistralai/utils/datetimes.py rename to packages/azure/src/mistralai/azure/client/utils/datetimes.py diff --git a/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/azure/src/mistralai/azure/client/utils/enums.py b/packages/azure/src/mistralai/azure/client/utils/enums.py new file mode 100644 index 00000000..3324e1bc --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/enums.py @@ -0,0 +1,134 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py new file mode 100644 index 00000000..f2052fc2 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py @@ -0,0 +1,280 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from dataclasses import dataclass, asdict +from typing import ( + Any, + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __iter__(self): + return self + + def __next__(self): + if self._closed: + raise StopIteration + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._closed: + raise StopAsyncIteration + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True + await self.response.aclose() + + +@dataclass +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Any = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", + b"\r\r", + b"\n\r", + b"\n\n", +] + +UTF8_BOM = b"\xef\xbb\xbf" + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + async for chunk in response.aiter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + await response.aclose() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + for chunk in response.iter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + response.close() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def _parse_event( + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim == 0: + continue + + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + publish = True + if "\x00" not in value: + event_id = value + elif field == "retry": + if value.isdigit(): + event.retry = int(value) + publish = True + + event.id = event_id + + if sentinel and data == f"{sentinel}\n": + return None, True, event_id + + if data: + data = data[:-1] + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data + + out = None + if publish: + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/azure/src/mistralai/azure/client/utils/forms.py b/packages/azure/src/mistralai/azure/client/utils/forms.py new file mode 100644 index 00000000..1e550bd5 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/forms.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py b/packages/azure/src/mistralai/azure/client/utils/headers.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/headers.py rename to packages/azure/src/mistralai/azure/client/utils/headers.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/azure/src/mistralai/azure/client/utils/logger.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/logger.py rename to packages/azure/src/mistralai/azure/client/utils/logger.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py b/packages/azure/src/mistralai/azure/client/utils/metadata.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/metadata.py rename to packages/azure/src/mistralai/azure/client/utils/metadata.py diff --git a/packages/azure/src/mistralai/azure/client/utils/queryparams.py b/packages/azure/src/mistralai/azure/client/utils/queryparams.py new file mode 100644 index 00000000..c04e0db8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/queryparams.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/azure/src/mistralai/azure/client/utils/requestbodies.py b/packages/azure/src/mistralai/azure/client/utils/requestbodies.py new file mode 100644 index 00000000..1de32b6d --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/azure/src/mistralai/azure/client/utils/retries.py b/packages/azure/src/mistralai/azure/client/utils/retries.py new file mode 100644 index 00000000..af07d4e9 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/retries.py @@ -0,0 +1,271 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/packages/azure/src/mistralai/azure/client/utils/security.py b/packages/azure/src/mistralai/azure/client/utils/security.py new file mode 100644 index 00000000..17996bd5 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/security.py @@ -0,0 +1,176 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/azure/src/mistralai/azure/client/utils/serializers.py b/packages/azure/src/mistralai/azure/client/utils/serializers.py new file mode 100644 index 00000000..14321eb4 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/serializers.py @@ -0,0 +1,229 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/azure/src/mistralai/azure/client/utils/unions.py b/packages/azure/src/mistralai/azure/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..fe0c9b8e --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.azure.client import errors + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise errors.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/url.py b/packages/azure/src/mistralai/azure/client/utils/url.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/url.py rename to packages/azure/src/mistralai/azure/client/utils/url.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/azure/src/mistralai/azure/client/utils/values.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/values.py rename to packages/azure/src/mistralai/azure/client/utils/values.py diff --git a/packages/azure/uv.lock b/packages/azure/uv.lock new file mode 100644 index 00000000..7c090c00 --- /dev/null +++ b/packages/azure/uv.lock @@ -0,0 +1,482 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/e3/c4c8d473d6780ef1853d630d581f70d655b4f8d7553c6997958c283039a2/anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94", size = 163930, upload-time = "2024-05-26T22:02:15.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/a2/10639a79341f6c019dedc95bd48a4928eed9f1d1197f4c04f546fc7ae0ff/anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7", size = 86780, upload-time = "2024-05-26T22:02:13.671Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/02/a95f2b11e207f68bc64d7aae9666fed2e2b3f307748d5123dffb72a1bbea/certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", size = 164065, upload-time = "2024-07-04T01:36:11.653Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90", size = 162960, upload-time = "2024-07-04T01:36:09.038Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dill" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847, upload-time = "2024-01-27T23:42:16.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252, upload-time = "2024-01-27T23:42:14.239Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883, upload-time = "2024-07-12T22:26:00.161Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453, upload-time = "2024-07-12T22:25:58.476Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/ed/f86a79a07470cb07819390452f178b3bef1d375f2ec021ecfc709fc7cf07/idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", size = 189575, upload-time = "2024-04-11T03:34:43.276Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/3e/741d8c82801c347547f8a2a06aa57dbb1992be9e948df2ea0eda2c8b79e8/idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0", size = 66836, upload-time = "2024-04-11T03:34:41.447Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistralai-azure" +version = "2.0.0b1" +source = { editable = "." } +dependencies = [ + { name = "httpcore" }, + { name = "httpx" }, + { name = "pydantic" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, + { name = "pylint" }, + { name = "pyright" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, +] + +[package.metadata] +requires-dist = [ + { name = "httpcore", specifier = ">=1.0.9" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.11.2" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433, upload-time = "2023-02-04T12:11:27.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "packaging" +version = "24.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788, upload-time = "2024-06-09T23:19:24.956Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985, upload-time = "2024-06-09T23:19:21.909Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", size = 20916, upload-time = "2024-05-15T03:18:23.372Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", size = 18146, upload-time = "2024-05-15T03:18:21.209Z" }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" }, + { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" }, + { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" }, + { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" }, + { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" }, + { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" }, + { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" }, + { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" }, + { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" }, + { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" }, + { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" }, + { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" }, + { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" }, + { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" }, + { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" }, + { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" }, + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, + { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" }, + { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" }, + { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" }, + { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" }, + { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" }, + { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" }, + { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" }, + { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" }, + { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" }, + { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" }, + { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" }, + { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.408" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" }, +] + +[[package]] +name = "pytest" +version = "8.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/8c/9862305bdcd6020bc7b45b1b5e7397a6caf1a33d3025b9a003b39075ffb2/pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce", size = 1439314, upload-time = "2024-07-25T10:40:00.159Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/f9/cf155cf32ca7d6fa3601bc4c5dd19086af4b320b706919d48a4c79081cf9/pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5", size = 341802, upload-time = "2024-07-25T10:39:57.834Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "tomli" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f", size = 15164, upload-time = "2022-02-08T10:54:04.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", size = 12757, upload-time = "2022-02-08T10:54:02.017Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4b/34/f5f4fbc6b329c948a90468dd423aaa3c3bfc1e07d5a76deec269110f2f6e/tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72", size = 191792, upload-time = "2024-07-10T09:25:56.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/7c/b753bf603852cab0a660da6e81f4ea5d2ca0f0b2b4870766d7aa9bceb7a2/tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264", size = 37770, upload-time = "2024-07-10T09:25:54.676Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222, upload-time = "2025-02-25T17:27:59.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125, upload-time = "2025-02-25T17:27:57.754Z" }, +] diff --git a/packages/gcp/.genignore b/packages/gcp/.genignore new file mode 100644 index 00000000..9a119b75 --- /dev/null +++ b/packages/gcp/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/gcp/client/sdk.py +src/mistralai/gcp/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/mistralai_gcp/.gitattributes b/packages/gcp/.gitattributes similarity index 100% rename from packages/mistralai_gcp/.gitattributes rename to packages/gcp/.gitattributes diff --git a/packages/gcp/.gitignore b/packages/gcp/.gitignore new file mode 100644 index 00000000..b386de74 --- /dev/null +++ b/packages/gcp/.gitignore @@ -0,0 +1,15 @@ +.env +.env.local +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ +.speakeasy/reports +README-PYPI.md +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock new file mode 100644 index 00000000..6e33773d --- /dev/null +++ b/packages/gcp/.speakeasy/gen.lock @@ -0,0 +1,803 @@ +lockVersion: 2.0.0 +id: ec60f2d8-7869-45c1-918e-773d41a8cf74 +management: + docChecksum: bc4a0ba9c38418d84a6a8a76b503977b + docVersion: 1.0.0 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0b1 + configChecksum: 9cea6a311ff15502c47b0ef87e9846a2 + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/gcp + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp + published: true +persistentEdits: + generation_id: e503bb37-7bdd-4ebf-9bed-a8f754c99f8a + pristine_commit_hash: f14b1b1288437b7fc0ba666a384614a225385259 + pristine_tree_hash: 67e6d0a84ae20666a636dcc8ad174647a96b105f +features: + python: + additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 + constsAndDefaults: 1.0.7 + core: 6.0.12 + defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 + envVarSecurityUsage: 0.3.2 + examples: 3.0.2 + flatRequests: 1.0.1 + globalSecurity: 3.0.5 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.2.0 + includes: 3.0.0 + methodArguments: 1.0.2 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.1.4 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:6374e05aeb66d48137d657acaa89527df2db35c6 + pristine_git_object: 8dbd4a82ad1d7725b9a6ce56daea208ca01b9210 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:e23cf88a5a9b0c99e68d06a8450b8bfb9aee33a2 + pristine_git_object: db76b6c81a71607f94c212a542fe30e082053a90 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:30b72826963e22cadf76ac0b7604288dbc4fb943 + pristine_git_object: a84dac32b99390e3fd0559714ca43795742192c6 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 + scripts/prepare_readme.py: + id: e0c5957a6035 + last_write_checksum: sha1:eb988bc0e00ed4bb14e9a3572845af14f06c9b42 + pristine_git_object: ae27b555c05c3c9f35d84e8bbe6a7c9f80cf94b2 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/gcp/client/__init__.py: + id: 4f63decd432e + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 + src/mistralai/gcp/client/_hooks/__init__.py: + id: adcb191838d1 + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai/gcp/client/_hooks/sdkhooks.py: + id: 7e23394c3f65 + last_write_checksum: sha1:4a03a16da35168f25ed0cccfdb0d4c4d86bbe242 + pristine_git_object: 2af4deeda8055f4c57c0c7f00a7b79033435cf34 + src/mistralai/gcp/client/_hooks/types.py: + id: 4f37fd18bfd9 + last_write_checksum: sha1:2b295cc28d5fa2c79495510c8b97a1ea60f993e0 + pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 + src/mistralai/gcp/client/_version.py: + id: f87319e32c7b + last_write_checksum: sha1:0d99fadc73b957112022a95eabeb0e3a98d14ff4 + pristine_git_object: 36e44a5e6067e8bd197b38cc238686f660c77244 + src/mistralai/gcp/client/basesdk.py: + id: 4d594572857b + last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 + pristine_git_object: b3edcb0aca1882d0cbe4d499cfba9cb5464c5b58 + src/mistralai/gcp/client/chat.py: + id: 4c41f05f786e + last_write_checksum: sha1:60b2697e2ecfb62eebed910007e62ab1df565eec + pristine_git_object: 925d69eda2fdac458045cc12327ca72997e07600 + src/mistralai/gcp/client/errors/__init__.py: + id: c51c8ed21629 + last_write_checksum: sha1:29f08ad600a712ff572843a250839ef92efac19b + pristine_git_object: 00c8ee0031486b5416bb6745397c463e1a5dbba6 + src/mistralai/gcp/client/errors/httpvalidationerror.py: + id: b0e25f1c36bd + last_write_checksum: sha1:c863914ed6704ee6c3ad99a77d8b1e742de069d0 + pristine_git_object: 598068197b9ed7e7756de01325f7967a719e46ea + src/mistralai/gcp/client/errors/mistralgcperror.py: + id: 9a9cad8f5d36 + last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e + pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + src/mistralai/gcp/client/errors/no_response_error.py: + id: 2d3e5fe56122 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/gcp/client/errors/responsevalidationerror.py: + id: 98f7bac284be + last_write_checksum: sha1:1b835d2ce8754b22d5fa269077d7a2eec11d7f29 + pristine_git_object: e8bd83c19b0629bb0ddf7a240e9b8371cb33fff3 + src/mistralai/gcp/client/errors/sdkerror.py: + id: c53aee73c8e1 + last_write_checksum: sha1:080933e9f354b675988a132813f23e55f9e5db74 + pristine_git_object: 6980924626fa5fbf67fb62a30fd23d5883dbe650 + src/mistralai/gcp/client/fim.py: + id: 13d2d208e0ef + last_write_checksum: sha1:1027165887446ce0764ad542ca52f61b460c71b8 + pristine_git_object: 4202102ae5218784a10ee93ada5a0643d23a1d0c + src/mistralai/gcp/client/httpclient.py: + id: a53dd7be6a4c + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai/gcp/client/models/__init__.py: + id: d9e976d01972 + last_write_checksum: sha1:97ddfc7f70abd5e1a0b36be6dce209b69e9d5c73 + pristine_git_object: 575f64040c90152e74954b749ea89bce5a07e02e + src/mistralai/gcp/client/models/assistantmessage.py: + id: d39c4bdd289e + last_write_checksum: sha1:c813783bcbeec4e40f12e007d1dde4aed8ec71cf + pristine_git_object: 702ac4708abb95fc18d138500b8353715c2dbc98 + src/mistralai/gcp/client/models/chatcompletionchoice.py: + id: 8e65b56f3e6d + last_write_checksum: sha1:e6d1382e9f880b866130d900fd866997aaf80e45 + pristine_git_object: ae5a2fbf38afbd86233dcaa8aa1c8441f5ed9eba + src/mistralai/gcp/client/models/chatcompletionrequest.py: + id: 4694a31c0003 + last_write_checksum: sha1:80fcbbcde773c22c93cf2db63beef2cfe3777497 + pristine_git_object: 8229c5bb13ded84039f3d8ddb95ac0a9c184e1bd + src/mistralai/gcp/client/models/chatcompletionresponse.py: + id: dd9e4796fca9 + last_write_checksum: sha1:76d7257583389ff5021e320a8f9a45a6deb07c7c + pristine_git_object: 317c4d84e378c14294d58c5aefd8c55ffe28754a + src/mistralai/gcp/client/models/chatcompletionstreamrequest.py: + id: 7294862af8ea + last_write_checksum: sha1:899210f881bdbe0a0d94e29fe7044fabbccc578c + pristine_git_object: 3c228d2e7edf08c36f310e190a8dedc7b4958459 + src/mistralai/gcp/client/models/completionchunk.py: + id: 6b9ed8c30877 + last_write_checksum: sha1:f1f091e94e3c1c1aefd3c3bb60c8de8236ab0ead + pristine_git_object: a0b1ae2fa3109a2c2b76bbc483b691d88dc9a15c + src/mistralai/gcp/client/models/completionevent.py: + id: 3f55c4b8fc75 + last_write_checksum: sha1:66665d921fd27df6ef0efce996a5446e49b989d8 + pristine_git_object: bb1550093ce9adcb9bcd0548b69796e82f4f260b + src/mistralai/gcp/client/models/completionresponsestreamchoice.py: + id: ad9b98ca7e1c + last_write_checksum: sha1:c4f9d733461bdb9a0d6c96e82212de7dddc04ffe + pristine_git_object: e58d4c88009ed3696d2a3a57f3796d8fb067019d + src/mistralai/gcp/client/models/contentchunk.py: + id: 8714d3bf2698 + last_write_checksum: sha1:acab1b53b1d324544c6aa6c4126a3fb5265278d2 + pristine_git_object: 18d481505e17d2125e380d796b0c406b0e66d601 + src/mistralai/gcp/client/models/deltamessage.py: + id: 404fc85f1a4c + last_write_checksum: sha1:982c2d15a570c7f4d5e1c3b012db46ea3bac609b + pristine_git_object: 63e6a7f3e50c138f235f5a36277aa8668f85cef1 + src/mistralai/gcp/client/models/fimcompletionrequest.py: + id: 5b79e2595d31 + last_write_checksum: sha1:80a2e3d5e10c240869cd96c41936d714cf8bf801 + pristine_git_object: e460f76c59315c22c75194936f1f3b232331f83c + src/mistralai/gcp/client/models/fimcompletionresponse.py: + id: 402f602d29b8 + last_write_checksum: sha1:cfe26848c7b14d6e374b7944d7ad44df822990b0 + pristine_git_object: 5b80da3f03e4e99dfca971a53af1cf6472c889bb + src/mistralai/gcp/client/models/fimcompletionstreamrequest.py: + id: 31190cf25070 + last_write_checksum: sha1:a95ab8c20b2fdff48102f08258a556af9f382ffa + pristine_git_object: fffc305499e578f77e42fb7992b59e933ae0ae7c + src/mistralai/gcp/client/models/function.py: + id: 2285a899b32e + last_write_checksum: sha1:6439f7f781174ae56b2b02ccbb4d02b08d8d5a03 + pristine_git_object: 439e831355444e0f9e82d23636651201f0db4bfc + src/mistralai/gcp/client/models/functioncall.py: + id: 17bb51f08e5f + last_write_checksum: sha1:b5fe2f061ea5f47057ee50011babc80de27e0ee6 + pristine_git_object: 0f1b24251ce728b3c2a0fb9e9ca94f90a9c3b7be + src/mistralai/gcp/client/models/functionname.py: + id: 313a6001145f + last_write_checksum: sha1:fe1eefaed314efa788bd15beb63bf6b81abb307e + pristine_git_object: 585b9e39762e49356823e211ad86f701bca389b8 + src/mistralai/gcp/client/models/imagedetail.py: + id: a28b2f3e2cb5 + last_write_checksum: sha1:a4874529961952019eaa86a2fa0989626f537a4c + pristine_git_object: 68ed76080716eb1424b13f182479f57e51a4fabf + src/mistralai/gcp/client/models/imageurl.py: + id: 4e330f3eae74 + last_write_checksum: sha1:6c0bee7d7c765fb2611131c7d270041671b428b8 + pristine_git_object: 903d0a1a45eeb7c5e8cde80f624b6e039de1f4cc + src/mistralai/gcp/client/models/imageurlchunk.py: + id: e68a4a393e9b + last_write_checksum: sha1:eae1d0e69a90b2f7513492e4cd0ed68d647f0b5d + pristine_git_object: 4bec0eec882c1eeee8a80f663ff7d686ca677ea0 + src/mistralai/gcp/client/models/jsonschema.py: + id: 39c6e7d412a0 + last_write_checksum: sha1:19b34a5e3f5c00d1a1b96f91a6e02f5ad12240c7 + pristine_git_object: 684ac09f0460bef1f26bf0030b79bbc7141ab99b + src/mistralai/gcp/client/models/mistralpromptmode.py: + id: 8be4a4a683e4 + last_write_checksum: sha1:c958567e95490abf3941fde69be69733e8afb90e + pristine_git_object: c765e4f1a0b86735255771231377f13d62f3d7a6 + src/mistralai/gcp/client/models/prediction.py: + id: 7a5463285bc8 + last_write_checksum: sha1:67c4a9b06d3e98552409a26960e0afd64f829b53 + pristine_git_object: 2e325289fd6c2a987ad270fd808f7b9a3f423440 + src/mistralai/gcp/client/models/referencechunk.py: + id: 523e477f8725 + last_write_checksum: sha1:aade1dc05c2a2672630eb17626e4f49367d6bfe6 + pristine_git_object: 261c4755641093a38f97b17dce3a387623e69ead + src/mistralai/gcp/client/models/responseformat.py: + id: 06774bb65b42 + last_write_checksum: sha1:7e64de46ef34718003cf0d198868a193f2122178 + pristine_git_object: f3aa9930e0f8a009dac628300d66c6209a538031 + src/mistralai/gcp/client/models/responseformats.py: + id: 18112ad0f6db + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai/gcp/client/models/security.py: + id: 7e13bda8273b + last_write_checksum: sha1:7086e929823d4eefe80cc279b605adfc8bbb08aa + pristine_git_object: 10a469b54d5e03873fb7d7d98627f2376c93d484 + src/mistralai/gcp/client/models/systemmessage.py: + id: 6537664d2d1b + last_write_checksum: sha1:779cb07cfd63ebe9eec496177cf1a8f5c077e417 + pristine_git_object: b3795c4bf4e97853979e0042cf4bd151d60ef974 + src/mistralai/gcp/client/models/systemmessagecontentchunks.py: + id: e120a6469c89 + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b + src/mistralai/gcp/client/models/textchunk.py: + id: a134f120d4dc + last_write_checksum: sha1:1ccc7d232136d6278d670542d192f36f46862df1 + pristine_git_object: 690322725c0f852a005d08c5b722c41709868b22 + src/mistralai/gcp/client/models/thinkchunk.py: + id: 59a1d1ef2020 + last_write_checksum: sha1:066eeb10de301264e601a9ec64d21e1cc13b0c20 + pristine_git_object: 33ec83949499d99a28c55bb20429ab948bb5b1e8 + src/mistralai/gcp/client/models/tool.py: + id: 4b27d45e56ad + last_write_checksum: sha1:cb0d879a55218fd7753bdd005be8a155982feb8f + pristine_git_object: 670aa81f8767e7c079105cf5995225168b4d6eb6 + src/mistralai/gcp/client/models/toolcall.py: + id: e6c25869a579 + last_write_checksum: sha1:f88e69a8e352025ca4b6897f6c16e1f7e4cd7264 + pristine_git_object: 3ea8e283c8f695bcc1fbc734b0074d37c2efeac8 + src/mistralai/gcp/client/models/toolchoice.py: + id: cb13a9f64c92 + last_write_checksum: sha1:71be72b1aae19aef1f8a461c89b71ad6daa009b7 + pristine_git_object: 6e795fd72792f740c8aa5b4da7d1f516018f2c2e + src/mistralai/gcp/client/models/toolchoiceenum.py: + id: d62e9c92d93c + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai/gcp/client/models/toolmessage.py: + id: b3774786c2e9 + last_write_checksum: sha1:3d414da8132467d1472ebe485802ffc78eb6f7e4 + pristine_git_object: ce160391f37ce3568daf2877f8dc1aa0f3694821 + src/mistralai/gcp/client/models/tooltypes.py: + id: 5926c64f5229 + last_write_checksum: sha1:ffd576511eed9f823c3d67df9fc5574d8d53c54b + pristine_git_object: fd1aa13d7b8c5d9bdb0922e04b8bd653ff843f60 + src/mistralai/gcp/client/models/usageinfo.py: + id: 3aab1af66cff + last_write_checksum: sha1:c0c949ac48ed35efe1e8fbf820b8e390edd9c3ce + pristine_git_object: cb6feb6e8d173d39b828d8f5b38af75173b4f7f2 + src/mistralai/gcp/client/models/usermessage.py: + id: 9cfa7260463e + last_write_checksum: sha1:780984241b84a7dfe1f6ad6eccace1204bfec8bd + pristine_git_object: e237e900421a9e65fd15aede29ade0e510b189f6 + src/mistralai/gcp/client/models/validationerror.py: + id: 6b4f4910ea9c + last_write_checksum: sha1:2792fd656f55519902f37670fb9fb3b43b4aa016 + pristine_git_object: 2d330e9acb579cc4928fa27fdd72288ce8832b8b + src/mistralai/gcp/client/py.typed: + id: 98b8ab80ab0d + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/gcp/client/sdkconfiguration.py: + id: 57be0f79ea1e + last_write_checksum: sha1:0c5905e7c6092f57c15ee4318a85c0985bcc1ccf + pristine_git_object: d56a634f688f6697ba84962381084dc2d0836ac9 + src/mistralai/gcp/client/types/__init__.py: + id: f7ef15ac2ba1 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai/gcp/client/types/basemodel.py: + id: 24babf758c19 + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai/gcp/client/utils/__init__.py: + id: a30c8ff6dcff + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 + src/mistralai/gcp/client/utils/annotations.py: + id: 9b2cd4ffc6e9 + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai/gcp/client/utils/datetimes.py: + id: dd1f0f91ea9d + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/gcp/client/utils/dynamic_imports.py: + id: 0091051cb000 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 + src/mistralai/gcp/client/utils/enums.py: + id: 2341407d5443 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai/gcp/client/utils/eventstreaming.py: + id: bb66f0c3e0dc + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b + src/mistralai/gcp/client/utils/forms.py: + id: ebf34781d6bd + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d + src/mistralai/gcp/client/utils/headers.py: + id: 4c369582903e + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai/gcp/client/utils/logger.py: + id: 082d86b60820 + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai/gcp/client/utils/metadata.py: + id: ff0e832b8b9c + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai/gcp/client/utils/queryparams.py: + id: 133b8408e73e + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai/gcp/client/utils/requestbodies.py: + id: 1be13a660954 + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai/gcp/client/utils/retries.py: + id: 542ebd75b79b + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e + src/mistralai/gcp/client/utils/security.py: + id: 5273152365f4 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed + src/mistralai/gcp/client/utils/serializers.py: + id: a7836e553d41 + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/gcp/client/utils/unions.py: + id: 8abba1cf1b6d + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e + src/mistralai/gcp/client/utils/unmarshal_json_response.py: + id: d972d22cf934 + last_write_checksum: sha1:5c75fb4ee04ae80a350ceb96abf4e1fdb255ee6c + pristine_git_object: ead3e5a00171b3a97af5112b6cd9ece698ce74f5 + src/mistralai/gcp/client/utils/url.py: + id: 0d311bbcb8f8 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai/gcp/client/utils/values.py: + id: 328207e9ae81 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} + fim_completion_v1_fim_completions_post: + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} +examplesVersion: 1.0.2 +generatedTests: {} +generatedFiles: + - .gitattributes + - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/jsonschema.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/mistralpromptmode.md + - docs/models/prediction.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - py.typed + - pylintrc + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai_gcp/__init__.py + - src/mistralai_gcp/_hooks/__init__.py + - src/mistralai_gcp/_hooks/sdkhooks.py + - src/mistralai_gcp/_hooks/types.py + - src/mistralai_gcp/_version.py + - src/mistralai_gcp/basesdk.py + - src/mistralai_gcp/chat.py + - src/mistralai_gcp/fim.py + - src/mistralai_gcp/httpclient.py + - src/mistralai_gcp/models/__init__.py + - src/mistralai_gcp/models/assistantmessage.py + - src/mistralai_gcp/models/chatcompletionchoice.py + - src/mistralai_gcp/models/chatcompletionrequest.py + - src/mistralai_gcp/models/chatcompletionresponse.py + - src/mistralai_gcp/models/chatcompletionstreamrequest.py + - src/mistralai_gcp/models/completionchunk.py + - src/mistralai_gcp/models/completionevent.py + - src/mistralai_gcp/models/completionresponsestreamchoice.py + - src/mistralai_gcp/models/contentchunk.py + - src/mistralai_gcp/models/deltamessage.py + - src/mistralai_gcp/models/fimcompletionrequest.py + - src/mistralai_gcp/models/fimcompletionresponse.py + - src/mistralai_gcp/models/fimcompletionstreamrequest.py + - src/mistralai_gcp/models/function.py + - src/mistralai_gcp/models/functioncall.py + - src/mistralai_gcp/models/functionname.py + - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/imageurl.py + - src/mistralai_gcp/models/imageurlchunk.py + - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/mistralgcperror.py + - src/mistralai_gcp/models/mistralpromptmode.py + - src/mistralai_gcp/models/no_response_error.py + - src/mistralai_gcp/models/prediction.py + - src/mistralai_gcp/models/referencechunk.py + - src/mistralai_gcp/models/responseformat.py + - src/mistralai_gcp/models/responseformats.py + - src/mistralai_gcp/models/responsevalidationerror.py + - src/mistralai_gcp/models/sdkerror.py + - src/mistralai_gcp/models/security.py + - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/systemmessagecontentchunks.py + - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/thinkchunk.py + - src/mistralai_gcp/models/tool.py + - src/mistralai_gcp/models/toolcall.py + - src/mistralai_gcp/models/toolchoice.py + - src/mistralai_gcp/models/toolchoiceenum.py + - src/mistralai_gcp/models/toolmessage.py + - src/mistralai_gcp/models/tooltypes.py + - src/mistralai_gcp/models/usageinfo.py + - src/mistralai_gcp/models/usermessage.py + - src/mistralai_gcp/models/validationerror.py + - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdkconfiguration.py + - src/mistralai_gcp/types/__init__.py + - src/mistralai_gcp/types/basemodel.py + - src/mistralai_gcp/utils/__init__.py + - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/datetimes.py + - src/mistralai_gcp/utils/enums.py + - src/mistralai_gcp/utils/eventstreaming.py + - src/mistralai_gcp/utils/forms.py + - src/mistralai_gcp/utils/headers.py + - src/mistralai_gcp/utils/logger.py + - src/mistralai_gcp/utils/metadata.py + - src/mistralai_gcp/utils/queryparams.py + - src/mistralai_gcp/utils/requestbodies.py + - src/mistralai_gcp/utils/retries.py + - src/mistralai_gcp/utils/security.py + - src/mistralai_gcp/utils/serializers.py + - src/mistralai_gcp/utils/unmarshal_json_response.py + - src/mistralai_gcp/utils/url.py + - src/mistralai_gcp/utils/values.py diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml new file mode 100644 index 00000000..18f4b4d5 --- /dev/null +++ b/packages/gcp/.speakeasy/gen.yaml @@ -0,0 +1,88 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralGCP + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + sdkInitStyle: constructor + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + nameResolutionFeb2025: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + securityFeb2025: true + sharedErrorComponentsApr2025: true + sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true + auth: + oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + versioningStrategy: automatic + persistentEdits: {} + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false +python: + version: 2.0.0b1 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: {} + allowedRedefinedBuiltins: + - id + - object + - input + - dir + asyncMode: both + authors: + - Mistral + baseErrorName: MistralGCPError + clientServerStatusCodesAsErrors: true + constFieldCasing: normal + defaultErrorName: SDKError + description: Python Client SDK for the Mistral AI API in GCP. + enableCustomCodeRegions: false + enumFormat: union + fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true + responseRequiredSep2024: true + flatAdditionalProperties: true + flattenGlobalSecurity: true + flattenRequests: true + flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only + imports: + option: openapi + paths: + callbacks: "" + errors: errors + operations: "" + shared: "" + webhooks: "" + inferUnionDiscriminators: true + inputModelSuffix: input + license: "" + maxMethodParams: 999 + methodArguments: infer-optional-args + moduleName: mistralai.gcp.client + multipartArrayFormat: standard + outputModelSuffix: output + packageManager: uv + packageName: mistralai-gcp + preApplyUnionDiscriminators: true + pytestFilterWarnings: [] + pytestTimeout: 0 + responseFormat: flat + sseFlatResponse: false + templateVersion: v2 + useAsyncHooks: false diff --git a/packages/mistralai_gcp/CONTRIBUTING.md b/packages/gcp/CONTRIBUTING.md similarity index 100% rename from packages/mistralai_gcp/CONTRIBUTING.md rename to packages/gcp/CONTRIBUTING.md diff --git a/packages/gcp/README.md b/packages/gcp/README.md new file mode 100644 index 00000000..5b66766b --- /dev/null +++ b/packages/gcp/README.md @@ -0,0 +1,428 @@ +# Mistral on GCP Python Client + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). + +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running + +```bash +gcloud auth application-default login +``` + +## SDK Installation + +Install the extras dependencies specific to Google Cloud: + +```bash +pip install mistralai[gcp] +``` + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +# Synchronous Example +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.gcp.client import MistralGCP + +async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL + s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model="mistral-small-2503") + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion + +### [fim](docs/sdks/fim/README.md) + +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ) +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client import models + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = None +try: + res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", + ) + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Override Server URL Per-Client + +The SDK automatically constructs the Vertex AI endpoint from `project_id` and `region`: +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +import os +from mistralai.gcp.client import MistralGCP +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region="us-central1", + client=http_client, +) +``` + +or you could wrap the client with your own custom logic: +```python +from typing import Any, Optional, Union +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralGCP( + project_id="", + region="us-central1", + async_client=CustomClient(httpx.AsyncClient()), +) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +The SDK automatically handles GCP authentication via `google.auth.default()`. Tokens are auto-refreshed when they expire. For example: +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model="mistral-small-2503", +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/gcp/RELEASES.md b/packages/gcp/RELEASES.md new file mode 100644 index 00000000..ec883c62 --- /dev/null +++ b/packages/gcp/RELEASES.md @@ -0,0 +1,21 @@ + + +## 2026-01-12 16:00:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] packages/mistralai_gcp +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp + +## 2026-02-25 17:36:50 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/gcp +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp \ No newline at end of file diff --git a/packages/gcp/USAGE.md b/packages/gcp/USAGE.md new file mode 100644 index 00000000..3156349d --- /dev/null +++ b/packages/gcp/USAGE.md @@ -0,0 +1,61 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +# Synchronous Example +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.gcp.client import MistralGCP + +async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL + s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model="mistral-small-2503") + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/gcp/docs/errors/httpvalidationerror.md similarity index 100% rename from packages/mistralai_gcp/docs/models/httpvalidationerror.md rename to packages/gcp/docs/errors/httpvalidationerror.md diff --git a/packages/mistralai_gcp/docs/models/arguments.md b/packages/gcp/docs/models/arguments.md similarity index 100% rename from packages/mistralai_gcp/docs/models/arguments.md rename to packages/gcp/docs/models/arguments.md diff --git a/packages/gcp/docs/models/assistantmessage.md b/packages/gcp/docs/models/assistantmessage.md new file mode 100644 index 00000000..9ef63837 --- /dev/null +++ b/packages/gcp/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagecontent.md b/packages/gcp/docs/models/assistantmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/assistantmessagecontent.md rename to packages/gcp/docs/models/assistantmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/gcp/docs/models/chatcompletionchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionchoice.md rename to packages/gcp/docs/models/chatcompletionchoice.md diff --git a/packages/gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/gcp/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 00000000..b2f15ecb --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionrequest.md b/packages/gcp/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..8dbd4a82 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequest.md @@ -0,0 +1,25 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionrequestmessage.md b/packages/gcp/docs/models/chatcompletionrequestmessage.md new file mode 100644 index 00000000..91e9e062 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionrequeststop.md b/packages/gcp/docs/models/chatcompletionrequeststop.md new file mode 100644 index 00000000..749296d4 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..dc82a8ef --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionresponse.md b/packages/gcp/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..a0465ffb --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionstreamrequest.md b/packages/gcp/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..db76b6c8 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,25 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md new file mode 100644 index 00000000..2e4e93ac --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessage + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionstreamrequeststop.md b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 00000000..a48460a9 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..43f3ca38 --- /dev/null +++ b/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestToolChoice + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/gcp/docs/models/completionchunk.md b/packages/gcp/docs/models/completionchunk.md new file mode 100644 index 00000000..7f8ab5e6 --- /dev/null +++ b/packages/gcp/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionevent.md b/packages/gcp/docs/models/completionevent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/completionevent.md rename to packages/gcp/docs/models/completionevent.md diff --git a/packages/gcp/docs/models/completionresponsestreamchoice.md b/packages/gcp/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/gcp/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 00000000..0fece473 --- /dev/null +++ b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/gcp/docs/models/contentchunk.md similarity index 100% rename from packages/mistralai_gcp/docs/models/contentchunk.md rename to packages/gcp/docs/models/contentchunk.md diff --git a/packages/gcp/docs/models/deltamessage.md b/packages/gcp/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/gcp/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/deltamessagecontent.md b/packages/gcp/docs/models/deltamessagecontent.md new file mode 100644 index 00000000..8142772d --- /dev/null +++ b/packages/gcp/docs/models/deltamessagecontent.md @@ -0,0 +1,17 @@ +# DeltaMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/gcp/docs/models/fimcompletionrequest.md similarity index 91% rename from packages/mistralai_gcp/docs/models/fimcompletionrequest.md rename to packages/gcp/docs/models/fimcompletionrequest.md index 7507b90c..fde0b625 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/gcp/docs/models/fimcompletionrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md b/packages/gcp/docs/models/fimcompletionrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md rename to packages/gcp/docs/models/fimcompletionrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/gcp/docs/models/fimcompletionresponse.md similarity index 93% rename from packages/mistralai_gcp/docs/models/fimcompletionresponse.md rename to packages/gcp/docs/models/fimcompletionresponse.md index da786a1f..cd62d034 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md +++ b/packages/gcp/docs/models/fimcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/gcp/docs/models/fimcompletionstreamrequest.md similarity index 91% rename from packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md rename to packages/gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..ba62d854 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/gcp/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md b/packages/gcp/docs/models/fimcompletionstreamrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md rename to packages/gcp/docs/models/fimcompletionstreamrequeststop.md diff --git a/packages/gcp/docs/models/function.md b/packages/gcp/docs/models/function.md new file mode 100644 index 00000000..b2bdb3fe --- /dev/null +++ b/packages/gcp/docs/models/function.md @@ -0,0 +1,11 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/functioncall.md b/packages/gcp/docs/models/functioncall.md similarity index 100% rename from packages/mistralai_gcp/docs/models/functioncall.md rename to packages/gcp/docs/models/functioncall.md diff --git a/packages/mistralai_gcp/docs/models/functionname.md b/packages/gcp/docs/models/functionname.md similarity index 100% rename from packages/mistralai_gcp/docs/models/functionname.md rename to packages/gcp/docs/models/functionname.md diff --git a/packages/gcp/docs/models/imagedetail.md b/packages/gcp/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/gcp/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurl.md b/packages/gcp/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/gcp/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurlchunk.md b/packages/gcp/docs/models/imageurlchunk.md new file mode 100644 index 00000000..a84dac32 --- /dev/null +++ b/packages/gcp/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Literal["image_url"]* | :heavy_check_mark: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurlunion.md b/packages/gcp/docs/models/imageurlunion.md new file mode 100644 index 00000000..db97130f --- /dev/null +++ b/packages/gcp/docs/models/imageurlunion.md @@ -0,0 +1,17 @@ +# ImageURLUnion + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/gcp/docs/models/jsonschema.md b/packages/gcp/docs/models/jsonschema.md new file mode 100644 index 00000000..7ff7c070 --- /dev/null +++ b/packages/gcp/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/loc.md b/packages/gcp/docs/models/loc.md similarity index 100% rename from packages/mistralai_gcp/docs/models/loc.md rename to packages/gcp/docs/models/loc.md diff --git a/packages/gcp/docs/models/mistralpromptmode.md b/packages/gcp/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..c3409d03 --- /dev/null +++ b/packages/gcp/docs/models/mistralpromptmode.md @@ -0,0 +1,12 @@ +# MistralPromptMode + +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/gcp/docs/models/prediction.md b/packages/gcp/docs/models/prediction.md new file mode 100644 index 00000000..fae3c1ca --- /dev/null +++ b/packages/gcp/docs/models/prediction.md @@ -0,0 +1,11 @@ +# Prediction + +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/referencechunk.md b/packages/gcp/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/gcp/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/responseformat.md b/packages/gcp/docs/models/responseformat.md new file mode 100644 index 00000000..5cab22f2 --- /dev/null +++ b/packages/gcp/docs/models/responseformat.md @@ -0,0 +1,11 @@ +# ResponseFormat + +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/responseformats.md b/packages/gcp/docs/models/responseformats.md new file mode 100644 index 00000000..2f5f1e55 --- /dev/null +++ b/packages/gcp/docs/models/responseformats.md @@ -0,0 +1,10 @@ +# ResponseFormats + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/security.md b/packages/gcp/docs/models/security.md similarity index 100% rename from packages/mistralai_gcp/docs/models/security.md rename to packages/gcp/docs/models/security.md diff --git a/packages/gcp/docs/models/systemmessage.md b/packages/gcp/docs/models/systemmessage.md new file mode 100644 index 00000000..10bda10f --- /dev/null +++ b/packages/gcp/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/systemmessagecontent.md b/packages/gcp/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..0c87baf3 --- /dev/null +++ b/packages/gcp/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.SystemMessageContentChunks]` + +```python +value: List[models.SystemMessageContentChunks] = /* values here */ +``` + diff --git a/packages/gcp/docs/models/systemmessagecontentchunks.md b/packages/gcp/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/gcp/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/gcp/docs/models/textchunk.md b/packages/gcp/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/gcp/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/thinkchunk.md b/packages/gcp/docs/models/thinkchunk.md new file mode 100644 index 00000000..b07f598e --- /dev/null +++ b/packages/gcp/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/gcp/docs/models/thinking.md b/packages/gcp/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/packages/gcp/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/packages/gcp/docs/models/tool.md b/packages/gcp/docs/models/tool.md new file mode 100644 index 00000000..fb661f72 --- /dev/null +++ b/packages/gcp/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolcall.md b/packages/gcp/docs/models/toolcall.md new file mode 100644 index 00000000..3819236b --- /dev/null +++ b/packages/gcp/docs/models/toolcall.md @@ -0,0 +1,11 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/toolchoice.md b/packages/gcp/docs/models/toolchoice.md new file mode 100644 index 00000000..373046bb --- /dev/null +++ b/packages/gcp/docs/models/toolchoice.md @@ -0,0 +1,11 @@ +# ToolChoice + +ToolChoice is either a ToolChoiceEnum or a ToolChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoiceenum.md b/packages/gcp/docs/models/toolchoiceenum.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolchoiceenum.md rename to packages/gcp/docs/models/toolchoiceenum.md diff --git a/packages/gcp/docs/models/toolmessage.md b/packages/gcp/docs/models/toolmessage.md new file mode 100644 index 00000000..7201481e --- /dev/null +++ b/packages/gcp/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagecontent.md b/packages/gcp/docs/models/toolmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolmessagecontent.md rename to packages/gcp/docs/models/toolmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/tooltypes.md b/packages/gcp/docs/models/tooltypes.md similarity index 100% rename from packages/mistralai_gcp/docs/models/tooltypes.md rename to packages/gcp/docs/models/tooltypes.md diff --git a/packages/gcp/docs/models/usageinfo.md b/packages/gcp/docs/models/usageinfo.md new file mode 100644 index 00000000..f5204ac9 --- /dev/null +++ b/packages/gcp/docs/models/usageinfo.md @@ -0,0 +1,12 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/usermessage.md b/packages/gcp/docs/models/usermessage.md new file mode 100644 index 00000000..e7a932ed --- /dev/null +++ b/packages/gcp/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagecontent.md b/packages/gcp/docs/models/usermessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/usermessagecontent.md rename to packages/gcp/docs/models/usermessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/utils/retryconfig.md b/packages/gcp/docs/models/utils/retryconfig.md similarity index 100% rename from packages/mistralai_gcp/docs/models/utils/retryconfig.md rename to packages/gcp/docs/models/utils/retryconfig.md diff --git a/packages/mistralai_gcp/docs/models/validationerror.md b/packages/gcp/docs/models/validationerror.md similarity index 100% rename from packages/mistralai_gcp/docs/models/validationerror.md rename to packages/gcp/docs/models/validationerror.md diff --git a/packages/gcp/docs/sdks/chat/README.md b/packages/gcp/docs/sdks/chat/README.md new file mode 100644 index 00000000..a1fdfd9a --- /dev/null +++ b/packages/gcp/docs/sdks/chat/README.md @@ -0,0 +1,127 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [complete](#complete) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.stream(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## complete + +Chat Completion + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/gcp/docs/sdks/fim/README.md b/packages/gcp/docs/sdks/fim/README.md new file mode 100644 index 00000000..61a28883 --- /dev/null +++ b/packages/gcp/docs/sdks/fim/README.md @@ -0,0 +1,113 @@ +# Fim +(*fim*) + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [stream](#stream) - Stream fim completion +* [complete](#complete) - Fim Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.fim.stream(prompt="def", model="codestral-2", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## complete + +FIM completion. + +### Example Usage + +```python +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.fim.complete(prompt="def", model="codestral-2", suffix="return a+b") + +if res is not None: + # handle response + print(res.choices[0].message.content) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md b/packages/gcp/docs/sdks/mistralgcp/README.md similarity index 100% rename from packages/mistralai_gcp/docs/sdks/mistralgcp/README.md rename to packages/gcp/docs/sdks/mistralgcp/README.md diff --git a/packages/mistralai_gcp/py.typed b/packages/gcp/py.typed similarity index 100% rename from packages/mistralai_gcp/py.typed rename to packages/gcp/py.typed diff --git a/packages/gcp/pylintrc b/packages/gcp/pylintrc new file mode 100644 index 00000000..0391ac11 --- /dev/null +++ b/packages/gcp/pylintrc @@ -0,0 +1,664 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id, + n + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import, + too-many-return-statements, + redefined-builtin + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object,input,dir + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/gcp/pyproject.toml b/packages/gcp/pyproject.toml new file mode 100644 index 00000000..c0497656 --- /dev/null +++ b/packages/gcp/pyproject.toml @@ -0,0 +1,74 @@ +[project] +name = "mistralai-gcp" +version = "2.0.0b1" +description = "Python Client SDK for the Mistral AI API in GCP." +authors = [{ name = "Mistral" }] +requires-python = ">=3.10" +readme = "README.md" +dependencies = [ + "eval-type-backport >=0.2.0", + "google-auth (>=2.31.0,<3.0.0)", + "httpx >=0.28.1", + "pydantic >=2.11.2", + "python-dateutil >=2.8.2", + "requests (>=2.32.3,<3.0.0)", + "typing-inspection >=0.4.0", +] + +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pyright>=1.1.401,<2", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", + "types-python-dateutil>=2.9.0.20240316,<3", +] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/gcp/client/py.typed"] + +[tool.hatch.build.targets.sdist] +include = ["src/mistralai"] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/gcp/client/py.typed" = "src/mistralai/gcp/client/py.typed" + +[tool.hatch.build.targets.wheel] +include = ["src/mistralai"] + +[tool.hatch.build.targets.wheel.sources] +"src" = "" + +[virtualenvs] +in-project = true + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "google" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" diff --git a/packages/gcp/scripts/prepare_readme.py b/packages/gcp/scripts/prepare_readme.py new file mode 100644 index 00000000..ae27b555 --- /dev/null +++ b/packages/gcp/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/gcp" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/gcp/scripts/publish.sh b/packages/gcp/scripts/publish.sh new file mode 100755 index 00000000..c35748f3 --- /dev/null +++ b/packages/gcp/scripts/publish.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/gcp/src/mistralai/gcp/client/__init__.py b/packages/gcp/src/mistralai/gcp/client/__init__.py new file mode 100644 index 00000000..833c68cd --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/__init__.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py b/packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py new file mode 100644 index 00000000..23d3283d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py @@ -0,0 +1,67 @@ +import json +import logging +from .types import BeforeRequestHook, BeforeRequestContext, Hooks +import httpx + +logger = logging.getLogger(__name__) + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +class GCPVertexAIPathHook(BeforeRequestHook): + """Build full Vertex AI URL path from project_id, region, and model. + + Extracts model from request body and builds the Vertex AI URL dynamically. + """ + + def __init__(self, project_id: str, region: str): + self.project_id = project_id + self.region = region + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + if not request.content: + return request + + try: + body = json.loads(request.content.decode("utf-8")) + except (UnicodeDecodeError, json.JSONDecodeError): + # Non-JSON body (e.g. multipart upload) — pass through unmodified + return request + + model = body.get("model") + if not model: + logger.warning( + "GCPVertexAIPathHook: request body has no 'model' field; " + "Vertex AI path will not be constructed. " + "Operation: %s", + hook_ctx.operation_id, + ) + return request + + is_streaming = "stream" in hook_ctx.operation_id.lower() + specifier = "streamRawPredict" if is_streaming else "rawPredict" + + path = ( + f"/v1/projects/{self.project_id}/locations/{self.region}/" + f"publishers/mistralai/models/{model}:{specifier}" + ) + + return httpx.Request( + method=request.method, + url=request.url.copy_with(path=path), + headers=request.headers, + content=request.content, + ) + + +def init_hooks(_hooks: Hooks) -> None: + """Initialize hooks. Called by SDKHooks.__init__. + + Note: GCPVertexAIPathHook requires project_id and region, so it is + registered separately in MistralGCP.__init__ after those values are known. + """ diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..2af4deed --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.gcp.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/types.py b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py new file mode 100644 index 00000000..ea95bed2 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from abc import ABC, abstractmethod +import httpx +from mistralai.gcp.client.httpclient import HttpClient +from mistralai.gcp.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py new file mode 100644 index 00000000..36e44a5e --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai-gcp" +__version__: str = "2.0.0b1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-gcp" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/basesdk.py b/packages/gcp/src/mistralai/gcp/client/basesdk.py new file mode 100644 index 00000000..b3edcb0a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/basesdk.py @@ -0,0 +1,384 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.gcp.client import errors, utils +from mistralai.gcp.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.gcp.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None + ) + + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/gcp/src/mistralai/gcp/client/chat.py b/packages/gcp/src/mistralai/gcp/client/chat.py new file mode 100644 index 00000000..925d69ed --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/chat.py @@ -0,0 +1,701 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.gcp.client import errors, models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + def stream( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request_async( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def complete( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + ) + + req = self._build_request_async( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/__init__.py b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py new file mode 100644 index 00000000..00c8ee00 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralgcperror import MistralGCPError +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralGCPError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py new file mode 100644 index 00000000..59806819 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.gcp.client.errors import MistralGCPError +from mistralai.gcp.client.models import validationerror as models_validationerror +from mistralai.gcp.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[models_validationerror.ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralGCPError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py b/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py new file mode 100644 index 00000000..9de91bf2 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralGCPError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py b/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py new file mode 100644 index 00000000..1deab64b --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py new file mode 100644 index 00000000..e8bd83c1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.gcp.client.errors import MistralGCPError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralGCPError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py new file mode 100644 index 00000000..69809246 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.gcp.client.errors import MistralGCPError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralGCPError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/packages/gcp/src/mistralai/gcp/client/fim.py b/packages/gcp/src/mistralai/gcp/client/fim.py new file mode 100644 index 00000000..4202102a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/fim.py @@ -0,0 +1,533 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.gcp.client import errors, models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + def stream( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def complete( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/httpclient.py b/packages/gcp/src/mistralai/gcp/client/httpclient.py new file mode 100644 index 00000000..89560b56 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/httpclient.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/__init__.py b/packages/gcp/src/mistralai/gcp/client/models/__init__.py new file mode 100644 index 00000000..575f6404 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/__init__.py @@ -0,0 +1,367 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk + from .deltamessage import ( + DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, + DeltaMessageTypedDict, + ) + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .imagedetail import ImageDetail + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .security import Security, SecurityTypedDict + from .systemmessage import ( + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "ContentChunk", + "ContentChunkTypedDict", + "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", + "DeltaMessageTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "ImageDetail", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "Loc", + "LocTypedDict", + "MistralPromptMode", + "Prediction", + "PredictionTypedDict", + "ReferenceChunk", + "ReferenceChunkTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Security", + "SecurityTypedDict", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "UnknownContentChunk", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", + "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "ImageDetail": ".imagedetail", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "Security": ".security", + "SecurityTypedDict": ".security", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py new file mode 100644 index 00000000..702ac470 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + +class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..ae5a2fbf --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.gcp.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +ChatCompletionChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: ChatCompletionChoiceFinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: ChatCompletionChoiceFinishReason diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..8229c5bb --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py @@ -0,0 +1,218 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class ChatCompletionRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..317c4d84 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.gcp.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..3c228d2e --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py new file mode 100644 index 00000000..a0b1ae2f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionevent.py b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py new file mode 100644 index 00000000..bb155009 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.gcp.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..e58d4c88 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py new file mode 100644 index 00000000..18d48150 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from functools import partial +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], +) + + +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + +ContentChunk = Annotated[ + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py new file mode 100644 index 00000000..63e6a7f3 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[DeltaMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py new file mode 100644 index 00000000..e460f76c --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py @@ -0,0 +1,129 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py new file mode 100644 index 00000000..5b80da3f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.gcp.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class FIMCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..fffc3054 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/function.py b/packages/gcp/src/mistralai/gcp/client/models/function.py new file mode 100644 index 00000000..439e8313 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/function.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/functioncall.py b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py new file mode 100644 index 00000000..0f1b2425 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/packages/gcp/src/mistralai/gcp/client/models/functionname.py b/packages/gcp/src/mistralai/gcp/client/models/functionname.py new file mode 100644 index 00000000..585b9e39 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py new file mode 100644 index 00000000..68ed7608 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurl.py b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py new file mode 100644 index 00000000..903d0a1a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imagedetail import ImageDetail +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[ImageDetail]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[ImageDetail] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py new file mode 100644 index 00000000..4bec0eec --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + type: Annotated[ + Annotated[Literal["image_url"], AfterValidator(validate_const("image_url"))], + pydantic.Field(alias="type"), + ] = "image_url" + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py new file mode 100644 index 00000000..684ac09f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py new file mode 100644 index 00000000..c765e4f1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/packages/gcp/src/mistralai/gcp/client/models/prediction.py b/packages/gcp/src/mistralai/gcp/client/models/prediction.py new file mode 100644 index 00000000..2e325289 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/prediction.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py new file mode 100644 index 00000000..261c4755 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/responseformat.py b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py new file mode 100644 index 00000000..f3aa9930 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/responseformats.py b/packages/gcp/src/mistralai/gcp/client/models/responseformats.py new file mode 100644 index 00000000..cbf83ce7 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformats.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/security.py b/packages/gcp/src/mistralai/gcp/client/models/security.py new file mode 100644 index 00000000..10a469b5 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/security.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import FieldMetadata, SecurityMetadata +from typing_extensions import Annotated, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py new file mode 100644 index 00000000..b3795c4b --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: Literal["system"] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..8de71c90 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from pydantic import Field +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[TextChunk, ThinkChunk], Field(discriminator="type") +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py new file mode 100644 index 00000000..69032272 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + type: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py new file mode 100644 index 00000000..33ec8394 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/tool.py b/packages/gcp/src/mistralai/gcp/client/models/tool.py new file mode 100644 index 00000000..670aa81f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/tool.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolcall.py b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py new file mode 100644 index 00000000..3ea8e283 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py new file mode 100644 index 00000000..6e795fd7 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py new file mode 100644 index 00000000..01f6f677 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py new file mode 100644 index 00000000..ce160391 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + role: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py new file mode 100644 index 00000000..fd1aa13d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py new file mode 100644 index 00000000..cb6feb6e --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/usermessage.py b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py new file mode 100644 index 00000000..e237e900 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.gcp.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: Literal["user"] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/validationerror.py b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py new file mode 100644 index 00000000..2d330e9a --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/py.typed b/packages/gcp/src/mistralai/gcp/client/py.typed similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/py.typed rename to packages/gcp/src/mistralai/gcp/client/py.typed diff --git a/packages/gcp/src/mistralai/gcp/client/sdk.py b/packages/gcp/src/mistralai/gcp/client/sdk.py new file mode 100644 index 00000000..e6e83839 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/sdk.py @@ -0,0 +1,243 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import google.auth +import google.auth.credentials +import google.auth.transport.requests +import httpx +import importlib +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import SDKHooks +from mistralai.gcp.client._hooks.registration import GCPVertexAIPathHook +from mistralai.gcp.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, cast +import weakref + +if TYPE_CHECKING: + from mistralai.gcp.client.chat import Chat + from mistralai.gcp.client.fim import Fim + + +class MistralGCP(BaseSDK): + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ + + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + _sub_sdk_map = { + "chat": ("mistralai.gcp.client.chat", "Chat"), + "fim": ("mistralai.gcp.client.fim", "Fim"), + } + + def __init__( + self, + project_id: Optional[str] = None, + region: str = "europe-west4", + access_token: Optional[str] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param project_id: GCP project ID (auto-detected from credentials if not provided) + :param region: GCP region for Vertex AI (default: europe-west4) + :param access_token: Fixed access token for testing (skips google.auth) + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + credentials: Optional[google.auth.credentials.Credentials] = None + if access_token is None: + creds, detected_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + if creds is None: + raise ValueError("Failed to obtain GCP credentials") + # Cast to Credentials base class which has refresh() and token + creds = cast(google.auth.credentials.Credentials, creds) + creds.refresh(google.auth.transport.requests.Request()) + credentials = creds + project_id = project_id or detected_project_id + + if project_id is None: + raise ValueError( + "project_id must be provided or available from default credentials" + ) + + self._credentials = credentials + self._project_id = project_id + self._region = region + self._fixed_access_token = access_token + + def get_auth_token() -> str: + if self._fixed_access_token: + return self._fixed_access_token + creds = self._credentials + if creds is None: + raise ValueError("No credentials available") + # Only refresh when the token is expired or missing. + # This avoids a blocking HTTP round-trip on every request and + # minimises event-loop blocking when called from async paths + # (the Speakeasy-generated basesdk always calls security + # callables synchronously). + if not creds.valid: + creds.refresh(google.auth.transport.requests.Request()) + token = creds.token + if token is None: + raise ValueError("Failed to obtain access token") + return token + + if server_url is None: + server_url = f"https://{region}-aiplatform.googleapis.com" + + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + def get_security() -> models.Security: + return models.Security(api_key=get_auth_token()) + + security: Callable[[], models.Security] = get_security + + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + self.sdk_configuration.__dict__["_hooks"] = hooks + + # Register hook that builds Vertex AI URL path + hooks.register_before_request_hook(GCPVertexAIPathHook(project_id, region)) + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py new file mode 100644 index 00000000..d56a634f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.gcp.client import models +from mistralai.gcp.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py b/packages/gcp/src/mistralai/gcp/client/types/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py rename to packages/gcp/src/mistralai/gcp/client/types/__init__.py diff --git a/packages/gcp/src/mistralai/gcp/client/types/basemodel.py b/packages/gcp/src/mistralai/gcp/client/types/basemodel.py new file mode 100644 index 00000000..a9a640a1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/types/basemodel.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/__init__.py b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py new file mode 100644 index 00000000..b488c2df --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .unions import parse_open_union + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "parse_open_union", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/annotations.py b/packages/gcp/src/mistralai/gcp/client/utils/annotations.py new file mode 100644 index 00000000..12e0aa4f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/annotations.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py b/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/enums.py b/packages/gcp/src/mistralai/gcp/client/utils/enums.py new file mode 100644 index 00000000..3324e1bc --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/enums.py @@ -0,0 +1,134 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py new file mode 100644 index 00000000..f2052fc2 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py @@ -0,0 +1,280 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from dataclasses import dataclass, asdict +from typing import ( + Any, + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __iter__(self): + return self + + def __next__(self): + if self._closed: + raise StopIteration + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._closed: + raise StopAsyncIteration + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True + await self.response.aclose() + + +@dataclass +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Any = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", + b"\r\r", + b"\n\r", + b"\n\n", +] + +UTF8_BOM = b"\xef\xbb\xbf" + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + async for chunk in response.aiter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + await response.aclose() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + for chunk in response.iter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + response.close() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def _parse_event( + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim == 0: + continue + + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + publish = True + if "\x00" not in value: + event_id = value + elif field == "retry": + if value.isdigit(): + event.retry = int(value) + publish = True + + event.id = event_id + + if sentinel and data == f"{sentinel}\n": + return None, True, event_id + + if data: + data = data[:-1] + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data + + out = None + if publish: + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/gcp/src/mistralai/gcp/client/utils/forms.py b/packages/gcp/src/mistralai/gcp/client/utils/forms.py new file mode 100644 index 00000000..1e550bd5 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/forms.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py b/packages/gcp/src/mistralai/gcp/client/utils/headers.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py rename to packages/gcp/src/mistralai/gcp/client/utils/headers.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/gcp/src/mistralai/gcp/client/utils/logger.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py rename to packages/gcp/src/mistralai/gcp/client/utils/logger.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py b/packages/gcp/src/mistralai/gcp/client/utils/metadata.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py rename to packages/gcp/src/mistralai/gcp/client/utils/metadata.py diff --git a/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py b/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py new file mode 100644 index 00000000..c04e0db8 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py b/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py new file mode 100644 index 00000000..1de32b6d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/gcp/src/mistralai/gcp/client/utils/retries.py b/packages/gcp/src/mistralai/gcp/client/utils/retries.py new file mode 100644 index 00000000..af07d4e9 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/retries.py @@ -0,0 +1,271 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/packages/gcp/src/mistralai/gcp/client/utils/security.py b/packages/gcp/src/mistralai/gcp/client/utils/security.py new file mode 100644 index 00000000..17996bd5 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/security.py @@ -0,0 +1,176 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/gcp/src/mistralai/gcp/client/utils/serializers.py b/packages/gcp/src/mistralai/gcp/client/utils/serializers.py new file mode 100644 index 00000000..14321eb4 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/serializers.py @@ -0,0 +1,229 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unions.py b/packages/gcp/src/mistralai/gcp/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..ead3e5a0 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.gcp.client import errors + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise errors.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py b/packages/gcp/src/mistralai/gcp/client/utils/url.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/url.py rename to packages/gcp/src/mistralai/gcp/client/utils/url.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/gcp/src/mistralai/gcp/client/utils/values.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/values.py rename to packages/gcp/src/mistralai/gcp/client/utils/values.py diff --git a/packages/gcp/uv.lock b/packages/gcp/uv.lock new file mode 100644 index 00000000..9bd9f9b6 --- /dev/null +++ b/packages/gcp/uv.lock @@ -0,0 +1,774 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "google-auth" +version = "2.45.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistralai-gcp" +version = "2.0.0b1" +source = { editable = "." } +dependencies = [ + { name = "eval-type-backport" }, + { name = "google-auth" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "typing-inspection" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, + { name = "pylint" }, + { name = "pyright" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "types-python-dateutil" }, +] + +[package.metadata] +requires-dist = [ + { name = "eval-type-backport", specifier = ">=0.2.0" }, + { name = "google-auth", specifier = ">=2.31.0,<3.0.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.11.2" }, + { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "requests", specifier = ">=2.32.3,<3.0.0" }, + { name = "typing-inspection", specifier = ">=0.4.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.408" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] diff --git a/packages/mistralai_azure/.genignore b/packages/mistralai_azure/.genignore deleted file mode 100644 index 513646da..00000000 --- a/packages/mistralai_azure/.genignore +++ /dev/null @@ -1,4 +0,0 @@ -src/mistralai_azure/sdk.py -README.md -USAGE.md -docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore deleted file mode 100644 index 5a82b069..00000000 --- a/packages/mistralai_azure/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.speakeasy/reports -README-PYPI.md -.venv/ -venv/ -src/*.egg-info/ -__pycache__/ -.pytest_cache/ -.python-version -.DS_Store -pyrightconfig.json diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock deleted file mode 100644 index b7d6f3ba..00000000 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ /dev/null @@ -1,185 +0,0 @@ -lockVersion: 2.0.0 -id: dc40fa48-2c4d-46ad-ac8b-270749770f34 -management: - docChecksum: 574e96caec9a63dbe3f39d646830f2c2 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 3a4d9b204b5731c461ed7279710d5ed6 - published: true -features: - python: - additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.5 - core: 5.12.3 - defaultEnabledRetries: 0.2.0 - enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.2 - examples: 3.0.1 - flatRequests: 1.0.1 - globalSecurity: 3.0.3 - globalSecurityCallbacks: 1.0.0 - globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 - methodArguments: 1.0.2 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.0 - responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.0.1 - serverEvents: 1.0.7 - serverEventsSentinels: 0.1.0 - serverIDs: 3.0.0 - unions: 3.0.4 -generatedFiles: - - .gitattributes - - .python-version - - .vscode/settings.json - - docs/models/arguments.md - - docs/models/assistantmessage.md - - docs/models/assistantmessagecontent.md - - docs/models/assistantmessagerole.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionchoicefinishreason.md - - docs/models/chatcompletionrequest.md - - docs/models/chatcompletionrequestmessages.md - - docs/models/chatcompletionrequeststop.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/completionchunk.md - - docs/models/completionevent.md - - docs/models/completionresponsestreamchoice.md - - docs/models/content.md - - docs/models/contentchunk.md - - docs/models/deltamessage.md - - docs/models/finishreason.md - - docs/models/function.md - - docs/models/functioncall.md - - docs/models/functionname.md - - docs/models/httpvalidationerror.md - - docs/models/imageurl.md - - docs/models/imageurlchunk.md - - docs/models/imageurlchunkimageurl.md - - docs/models/imageurlchunktype.md - - docs/models/jsonschema.md - - docs/models/loc.md - - docs/models/messages.md - - docs/models/prediction.md - - docs/models/referencechunk.md - - docs/models/referencechunktype.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/role.md - - docs/models/security.md - - docs/models/stop.md - - docs/models/systemmessage.md - - docs/models/systemmessagecontent.md - - docs/models/textchunk.md - - docs/models/tool.md - - docs/models/toolcall.md - - docs/models/toolchoice.md - - docs/models/toolchoiceenum.md - - docs/models/toolmessage.md - - docs/models/toolmessagecontent.md - - docs/models/toolmessagerole.md - - docs/models/tooltypes.md - - docs/models/type.md - - docs/models/usageinfo.md - - docs/models/usermessage.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/utils/retryconfig.md - - docs/models/validationerror.md - - poetry.toml - - py.typed - - pylintrc - - pyproject.toml - - scripts/prepare_readme.py - - scripts/publish.sh - - src/mistralai_azure/__init__.py - - src/mistralai_azure/_hooks/__init__.py - - src/mistralai_azure/_hooks/sdkhooks.py - - src/mistralai_azure/_hooks/types.py - - src/mistralai_azure/_version.py - - src/mistralai_azure/basesdk.py - - src/mistralai_azure/chat.py - - src/mistralai_azure/httpclient.py - - src/mistralai_azure/models/__init__.py - - src/mistralai_azure/models/assistantmessage.py - - src/mistralai_azure/models/chatcompletionchoice.py - - src/mistralai_azure/models/chatcompletionrequest.py - - src/mistralai_azure/models/chatcompletionresponse.py - - src/mistralai_azure/models/chatcompletionstreamrequest.py - - src/mistralai_azure/models/completionchunk.py - - src/mistralai_azure/models/completionevent.py - - src/mistralai_azure/models/completionresponsestreamchoice.py - - src/mistralai_azure/models/contentchunk.py - - src/mistralai_azure/models/deltamessage.py - - src/mistralai_azure/models/function.py - - src/mistralai_azure/models/functioncall.py - - src/mistralai_azure/models/functionname.py - - src/mistralai_azure/models/httpvalidationerror.py - - src/mistralai_azure/models/imageurl.py - - src/mistralai_azure/models/imageurlchunk.py - - src/mistralai_azure/models/jsonschema.py - - src/mistralai_azure/models/prediction.py - - src/mistralai_azure/models/referencechunk.py - - src/mistralai_azure/models/responseformat.py - - src/mistralai_azure/models/responseformats.py - - src/mistralai_azure/models/sdkerror.py - - src/mistralai_azure/models/security.py - - src/mistralai_azure/models/systemmessage.py - - src/mistralai_azure/models/textchunk.py - - src/mistralai_azure/models/tool.py - - src/mistralai_azure/models/toolcall.py - - src/mistralai_azure/models/toolchoice.py - - src/mistralai_azure/models/toolchoiceenum.py - - src/mistralai_azure/models/toolmessage.py - - src/mistralai_azure/models/tooltypes.py - - src/mistralai_azure/models/usageinfo.py - - src/mistralai_azure/models/usermessage.py - - src/mistralai_azure/models/validationerror.py - - src/mistralai_azure/py.typed - - src/mistralai_azure/sdk.py - - src/mistralai_azure/sdkconfiguration.py - - src/mistralai_azure/types/__init__.py - - src/mistralai_azure/types/basemodel.py - - src/mistralai_azure/utils/__init__.py - - src/mistralai_azure/utils/annotations.py - - src/mistralai_azure/utils/enums.py - - src/mistralai_azure/utils/eventstreaming.py - - src/mistralai_azure/utils/forms.py - - src/mistralai_azure/utils/headers.py - - src/mistralai_azure/utils/logger.py - - src/mistralai_azure/utils/metadata.py - - src/mistralai_azure/utils/queryparams.py - - src/mistralai_azure/utils/requestbodies.py - - src/mistralai_azure/utils/retries.py - - src/mistralai_azure/utils/security.py - - src/mistralai_azure/utils/serializers.py - - src/mistralai_azure/utils/url.py - - src/mistralai_azure/utils/values.py -examples: - stream_chat: - speakeasy-default-stream-chat: - requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "422": - application/json: {} - "200": {} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: - requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} -examplesVersion: 1.0.0 -generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml deleted file mode 100644 index be4a1781..00000000 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ /dev/null @@ -1,50 +0,0 @@ -configVersion: 2.0.0 -generation: - sdkClassName: MistralAzure - maintainOpenAPIOrder: true - usageSnippets: - optionalPropertyRendering: withExample - useClassNamesForArrayFields: true - fixes: - nameResolutionDec2023: true - nameResolutionFeb2025: false - parameterOrderingFeb2024: true - requestResponseComponentNamesFeb2024: true - securityFeb2025: false - auth: - oAuth2ClientCredentialsEnabled: true - oAuth2PasswordEnabled: false -python: - version: 1.6.0 - additionalDependencies: - dev: - pytest: ^8.2.2 - pytest-asyncio: ^0.23.7 - authors: - - Mistral - clientServerStatusCodesAsErrors: true - defaultErrorName: SDKError - description: Python Client SDK for the Mistral AI API in Azure. - enableCustomCodeRegions: false - enumFormat: union - fixFlags: - responseRequiredSep2024: false - flattenGlobalSecurity: true - flattenRequests: true - flatteningOrder: parameters-first - imports: - option: openapi - paths: - callbacks: "" - errors: "" - operations: "" - shared: "" - webhooks: "" - inputModelSuffix: input - maxMethodParams: 15 - methodArguments: infer-optional-args - outputModelSuffix: output - packageName: mistralai_azure - pytestTimeout: 0 - responseFormat: flat - templateVersion: v2 diff --git a/packages/mistralai_azure/.vscode/settings.json b/packages/mistralai_azure/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/packages/mistralai_azure/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/packages/mistralai_azure/README.md b/packages/mistralai_azure/README.md deleted file mode 100644 index 65bc2e4f..00000000 --- a/packages/mistralai_azure/README.md +++ /dev/null @@ -1,430 +0,0 @@ -# Mistral on Azure Python Client - -## SDK Installation - -PIP -```bash -pip install mistralai -``` - -Poetry -```bash -poetry add mistralai -``` - -**Prerequisites** - -Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. -See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). - - -## SDK Example Usage - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.complete( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="azureai" -) - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_azure import MistralAzure -import os - -async def main(): - s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") - ) - res = await s.chat.complete_async( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="azureai" - ) - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - - - -## Available Resources and Operations - -### [chat](docs/sdks/chat/README.md) - -* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -* [create](docs/sdks/chat/README.md#create) - Chat Completion - - - -## Server-sent event streaming - -[Server-sent events][mdn-sse] are used to stream content from certain -operations. These operations will expose the stream as [Generator][generator] that -can be consumed using a simple `for` loop. The loop will -terminate when the server no longer has any events to send and closes the -underlying connection. - -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="azureai" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events -[generator]: https://wiki.python.org/moin/Generators - - - -## Retries - -Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. - -To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: -```python -from mistralai_azure import MistralAzure -from mistralazure.utils import BackoffStrategy, RetryConfig -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai", - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: -```python -from mistralai_azure import MistralAzure -from mistralazure.utils import BackoffStrategy, RetryConfig -import os - -s = MistralAzure( - retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - - -## Error Handling - -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. - -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - -### Example - -```python -from mistralai_azure import MistralAzure, models -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - -res = None -try: - res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -except models.HTTPValidationError as e: - # handle exception - raise(e) -except models.SDKError as e: - # handle exception - raise(e) - -if res is not None: - # handle response - pass - -``` - - - -## Server Selection - -### Select Server by Name - -You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: - -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | - -#### Example - -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - server="prod", - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - -### Override Server URL Per-Client - -The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - server_url="https://api.mistral.ai", - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - - -## Custom HTTP Client - -The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. -Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. -This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. - -For example, you could specify a header for every request that this sdk makes as follows: -```python -from mistralai_azure import MistralAzure -import httpx - -http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = MistralAzure(client=http_client) -``` - -or you could wrap the client with your own custom logic: -```python -from mistralai_azure import MistralAzure -from mistralai_azure.httpclient import AsyncHttpClient -import httpx - -class CustomClient(AsyncHttpClient): - client: AsyncHttpClient - - def __init__(self, client: AsyncHttpClient): - self.client = client - - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - request.headers["Client-Level-Header"] = "added by client" - - return await self.client.send( - request, stream=stream, auth=auth, follow_redirects=follow_redirects - ) - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - return self.client.build_request( - method, - url, - content=content, - data=data, - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - extensions=extensions, - ) - -s = MistralAzure(async_client=CustomClient(httpx.AsyncClient())) -``` - - - -## Authentication - -### Per-Client Security Schemes - -This SDK supports the following security scheme globally: - -| Name | Type | Scheme | -| --------- | ---- | ----------- | -| `api_key` | http | HTTP Bearer | - -To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - - - -# Development - -## Contributions - -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_azure/USAGE.md b/packages/mistralai_azure/USAGE.md deleted file mode 100644 index 0ccf3d70..00000000 --- a/packages/mistralai_azure/USAGE.md +++ /dev/null @@ -1,55 +0,0 @@ - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_azure import MistralAzure -import os - -async def main(): - s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="azureai") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md deleted file mode 100644 index 3d0bd90b..00000000 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ /dev/null @@ -1,11 +0,0 @@ -# AssistantMessage - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagerole.md b/packages/mistralai_azure/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/packages/mistralai_azure/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md deleted file mode 100644 index a9a174fb..00000000 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ /dev/null @@ -1,24 +0,0 @@ -# ChatCompletionRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md b/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md deleted file mode 100644 index bc7708a6..00000000 --- a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md deleted file mode 100644 index 1646528d..00000000 --- a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md +++ /dev/null @@ -1,17 +0,0 @@ -# ChatCompletionRequestToolChoice - - -## Supported Types - -### `models.ToolChoice` - -```python -value: models.ToolChoice = /* values here */ -``` - -### `models.ToolChoiceEnum` - -```python -value: models.ToolChoiceEnum = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/mistralai_azure/docs/models/chatcompletionresponse.md deleted file mode 100644 index ad376158..00000000 --- a/packages/mistralai_azure/docs/models/chatcompletionresponse.md +++ /dev/null @@ -1,13 +0,0 @@ -# ChatCompletionResponse - - -## Fields - -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | -| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | -| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | -| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md deleted file mode 100644 index b3e06e7a..00000000 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ /dev/null @@ -1,24 +0,0 @@ -# ChatCompletionStreamRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md deleted file mode 100644 index cce0ca3e..00000000 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md +++ /dev/null @@ -1,17 +0,0 @@ -# ChatCompletionStreamRequestToolChoice - - -## Supported Types - -### `models.ToolChoice` - -```python -value: models.ToolChoice = /* values here */ -``` - -### `models.ToolChoiceEnum` - -```python -value: models.ToolChoiceEnum = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/mistralai_azure/docs/models/completionchunk.md deleted file mode 100644 index b8ae6a09..00000000 --- a/packages/mistralai_azure/docs/models/completionchunk.md +++ /dev/null @@ -1,13 +0,0 @@ -# CompletionChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md deleted file mode 100644 index c807dacd..00000000 --- a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionResponseStreamChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | -| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | -| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/mistralai_azure/docs/models/content.md deleted file mode 100644 index a833dc2c..00000000 --- a/packages/mistralai_azure/docs/models/content.md +++ /dev/null @@ -1,17 +0,0 @@ -# Content - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[models.ContentChunk]` - -```python -value: List[models.ContentChunk] = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md deleted file mode 100644 index 61deabbf..00000000 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeltaMessage - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/finishreason.md b/packages/mistralai_azure/docs/models/finishreason.md deleted file mode 100644 index 45a5aedb..00000000 --- a/packages/mistralai_azure/docs/models/finishreason.md +++ /dev/null @@ -1,11 +0,0 @@ -# FinishReason - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `STOP` | stop | -| `LENGTH` | length | -| `ERROR` | error | -| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md deleted file mode 100644 index a166b7bb..00000000 --- a/packages/mistralai_azure/docs/models/function.md +++ /dev/null @@ -1,11 +0,0 @@ -# Function - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurl.md b/packages/mistralai_azure/docs/models/imageurl.md deleted file mode 100644 index 7c2bcbc3..00000000 --- a/packages/mistralai_azure/docs/models/imageurl.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImageURL - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunk.md b/packages/mistralai_azure/docs/models/imageurlchunk.md deleted file mode 100644 index f1b926ef..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunk.md +++ /dev/null @@ -1,11 +0,0 @@ -# ImageURLChunk - -{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md b/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md deleted file mode 100644 index 76738908..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md +++ /dev/null @@ -1,17 +0,0 @@ -# ImageURLChunkImageURL - - -## Supported Types - -### `models.ImageURL` - -```python -value: models.ImageURL = /* values here */ -``` - -### `str` - -```python -value: str = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/imageurlchunktype.md b/packages/mistralai_azure/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md deleted file mode 100644 index ae387867..00000000 --- a/packages/mistralai_azure/docs/models/jsonschema.md +++ /dev/null @@ -1,11 +0,0 @@ -# JSONSchema - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/messages.md b/packages/mistralai_azure/docs/models/messages.md deleted file mode 100644 index 1d394500..00000000 --- a/packages/mistralai_azure/docs/models/messages.md +++ /dev/null @@ -1,29 +0,0 @@ -# Messages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md deleted file mode 100644 index 86e9c396..00000000 --- a/packages/mistralai_azure/docs/models/prediction.md +++ /dev/null @@ -1,9 +0,0 @@ -# Prediction - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunk.md b/packages/mistralai_azure/docs/models/referencechunk.md deleted file mode 100644 index a132ca2f..00000000 --- a/packages/mistralai_azure/docs/models/referencechunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReferenceChunk - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunktype.md b/packages/mistralai_azure/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/packages/mistralai_azure/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md deleted file mode 100644 index 23a1641b..00000000 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ /dev/null @@ -1,9 +0,0 @@ -# ResponseFormat - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md deleted file mode 100644 index 06886afe..00000000 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ /dev/null @@ -1,12 +0,0 @@ -# ResponseFormats - -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `TEXT` | text | -| `JSON_OBJECT` | json_object | -| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/role.md b/packages/mistralai_azure/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/packages/mistralai_azure/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/stop.md b/packages/mistralai_azure/docs/models/stop.md deleted file mode 100644 index ba40ca83..00000000 --- a/packages/mistralai_azure/docs/models/stop.md +++ /dev/null @@ -1,19 +0,0 @@ -# Stop - -Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[str]` - -```python -value: List[str] = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/mistralai_azure/docs/models/systemmessage.md deleted file mode 100644 index 0dba71c0..00000000 --- a/packages/mistralai_azure/docs/models/systemmessage.md +++ /dev/null @@ -1,9 +0,0 @@ -# SystemMessage - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/mistralai_azure/docs/models/systemmessagecontent.md deleted file mode 100644 index e0d27d9f..00000000 --- a/packages/mistralai_azure/docs/models/systemmessagecontent.md +++ /dev/null @@ -1,17 +0,0 @@ -# SystemMessageContent - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[models.TextChunk]` - -```python -value: List[models.TextChunk] = /* values here */ -``` - diff --git a/packages/mistralai_azure/docs/models/textchunk.md b/packages/mistralai_azure/docs/models/textchunk.md deleted file mode 100644 index 6daab3c3..00000000 --- a/packages/mistralai_azure/docs/models/textchunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# TextChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md deleted file mode 100644 index 822f86f8..00000000 --- a/packages/mistralai_azure/docs/models/tool.md +++ /dev/null @@ -1,9 +0,0 @@ -# Tool - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md deleted file mode 100644 index 574be1ea..00000000 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ /dev/null @@ -1,11 +0,0 @@ -# ToolCall - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/mistralai_azure/docs/models/toolchoice.md deleted file mode 100644 index 792ebcd6..00000000 --- a/packages/mistralai_azure/docs/models/toolchoice.md +++ /dev/null @@ -1,11 +0,0 @@ -# ToolChoice - -ToolChoice is either a ToolChoiceEnum or a ToolChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessage.md b/packages/mistralai_azure/docs/models/toolmessage.md deleted file mode 100644 index a54f4933..00000000 --- a/packages/mistralai_azure/docs/models/toolmessage.md +++ /dev/null @@ -1,11 +0,0 @@ -# ToolMessage - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | -| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagerole.md b/packages/mistralai_azure/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/packages/mistralai_azure/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/type.md b/packages/mistralai_azure/docs/models/type.md deleted file mode 100644 index eb0581e7..00000000 --- a/packages/mistralai_azure/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/mistralai_azure/docs/models/usageinfo.md deleted file mode 100644 index 9f56a3ae..00000000 --- a/packages/mistralai_azure/docs/models/usageinfo.md +++ /dev/null @@ -1,10 +0,0 @@ -# UsageInfo - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/mistralai_azure/docs/models/usermessage.md deleted file mode 100644 index 63b01310..00000000 --- a/packages/mistralai_azure/docs/models/usermessage.md +++ /dev/null @@ -1,9 +0,0 @@ -# UserMessage - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagerole.md b/packages/mistralai_azure/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/packages/mistralai_azure/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/sdks/chat/README.md b/packages/mistralai_azure/docs/sdks/chat/README.md deleted file mode 100644 index 26d20bb4..00000000 --- a/packages/mistralai_azure/docs/sdks/chat/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# Chat -(*chat*) - -## Overview - -Chat Completion API. - -### Available Operations - -* [stream](#stream) - Stream chat completion -* [create](#create) - Chat Completion - -## stream - -Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - -### Example Usage - -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -### Parameters - -| Parameter | Type | Required | Description | Example | -| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - - -### Response - -**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** -### Errors - -| Error Object | Status Code | Content Type | -| --------------- | ----------- | ------------ | -| models.SDKError | 4xx-5xx | */* | - -## create - -Chat Completion - -### Example Usage - -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - # handle response - pass - -``` - -### Parameters - -| Parameter | Type | Required | Description | Example | -| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - - -### Response - -**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** -### Errors - -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock deleted file mode 100644 index 8b70ddcc..00000000 --- a/packages/mistralai_azure/poetry.lock +++ /dev/null @@ -1,665 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[[package]] -name = "anyio" -version = "4.4.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] -trio = ["trio (>=0.23)"] - -[[package]] -name = "astroid" -version = "3.2.4" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "certifi" -version = "2024.7.4" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "dill" -version = "0.3.8" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "eval-type-backport" -version = "0.2.0" -description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, - {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.5" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] - -[[package]] -name = "httpx" -version = "0.28.1" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" - -[package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "idna" -version = "3.7" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -groups = ["main"] -files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mypy" -version = "1.14.1" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, -] - -[package.dependencies] -mypy_extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -faster-cache = ["orjson"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["dev"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "packaging" -version = "24.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, -] - -[[package]] -name = "platformdirs" -version = "4.2.2" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, -] - -[package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pydantic" -version = "2.10.6" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, -] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" -typing-extensions = ">=4.12.2" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] - -[[package]] -name = "pydantic-core" -version = "2.27.2" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pylint" -version = "3.2.3" -description = "python code static checker" -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, -] - -[package.dependencies] -astroid = ">=3.2.2,<=3.3.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, -] -isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pytest" -version = "8.3.2" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.23.8" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, -] - -[package.dependencies] -pytest = ">=7.0.0,<9" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.0" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, -] - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20240316" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, - {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -] - -[package.dependencies] -typing-extensions = ">=4.12.0" - -[metadata] -lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "16a8e7bb56287babdd384870773880315911c8f1851d21314cf11ca92104c600" diff --git a/packages/mistralai_azure/poetry.toml b/packages/mistralai_azure/poetry.toml deleted file mode 100644 index ab1033bd..00000000 --- a/packages/mistralai_azure/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc deleted file mode 100644 index 266bc815..00000000 --- a/packages/mistralai_azure/pylintrc +++ /dev/null @@ -1,662 +0,0 @@ -[MAIN] - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Clear in-memory caches upon conclusion of linting. Useful if running pylint -# in a server-like mode. -clear-cache-post-run=no - -# Load and enable all available extensions. Use --list-extensions to see a list -# all available extensions. -#enable-all-extensions= - -# In error mode, messages with a category besides ERROR or FATAL are -# suppressed, and no reports are done by default. Error mode is compatible with -# disabling specific errors. -#errors-only= - -# Always return a 0 (non-error) status code, even if lint errors are found. -# This is primarily useful in continuous integration scripts. -#exit-zero= - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. -extension-pkg-allow-list= - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. (This is an alternative name to extension-pkg-allow-list -# for backward compatibility.) -extension-pkg-whitelist= - -# Return non-zero exit code if any of these messages/categories are detected, -# even if score is above --fail-under value. Syntax same as enable. Messages -# specified are enabled, while categories only check already-enabled messages. -fail-on= - -# Specify a score threshold under which the program will exit with error. -fail-under=10 - -# Interpret the stdin as a python script, whose filename needs to be passed as -# the module_or_package argument. -#from-stdin= - -# Files or directories to be skipped. They should be base names, not paths. -ignore=CVS - -# Add files or directories matching the regular expressions patterns to the -# ignore-list. The regex matches against paths and can be in Posix or Windows -# format. Because '\\' represents the directory delimiter on Windows systems, -# it can't be used as an escape character. -ignore-paths= - -# Files or directories matching the regular expression patterns are skipped. -# The regex matches against base names, not paths. The default value ignores -# Emacs file locks -ignore-patterns=^\.# - -# List of module names for which member attributes should not be checked and -# will not be imported (useful for modules/projects where namespaces are -# manipulated during runtime and thus existing member attributes cannot be -# deduced by static analysis). It supports qualified module names, as well as -# Unix pattern matching. -ignored-modules= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the -# number of processors available to use, and will cap the count on Windows to -# avoid hangs. -jobs=1 - -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 - -# List of plugins (as comma separated values of python module names) to load, -# usually to register additional checkers. -load-plugins= - -# Pickle collected data for later comparisons. -persistent=yes - -# Minimum Python version to use for version dependent checks. Will default to -# the version used to run pylint. -py-version=3.9 - -# Discover python modules and packages in the file system subtree. -recursive=no - -# Add paths to the list of the source roots. Supports globbing patterns. The -# source root is an absolute path or a path relative to the current working -# directory used to determine a package namespace for modules located under the -# source root. -source-roots=src - -# When enabled, pylint would attempt to guess common misconfiguration and emit -# user-friendly hints instead of false-positive error messages. -suggestion-mode=yes - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# In verbose mode, extra non-checker-related info will be displayed. -#verbose= - - -[BASIC] - -# Naming style matching correct argument names. -argument-naming-style=snake_case - -# Regular expression matching correct argument names. Overrides argument- -# naming-style. If left empty, argument names will be checked with the set -# naming style. -#argument-rgx= - -# Naming style matching correct attribute names. -#attr-naming-style=snake_case - -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. If left empty, attribute names will be checked with the set naming -# style. -attr-rgx=[^\W\d][^\W]*|__.*__$ - -# Bad variable names which should always be refused, separated by a comma. -bad-names= - -# Bad variable names regexes, separated by a comma. If names match any regex, -# they will always be refused -bad-names-rgxs= - -# Naming style matching correct class attribute names. -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style. If left empty, class attribute names will be checked -# with the set naming style. -#class-attribute-rgx= - -# Naming style matching correct class constant names. -class-const-naming-style=UPPER_CASE - -# Regular expression matching correct class constant names. Overrides class- -# const-naming-style. If left empty, class constant names will be checked with -# the set naming style. -#class-const-rgx= - -# Naming style matching correct class names. -class-naming-style=PascalCase - -# Regular expression matching correct class names. Overrides class-naming- -# style. If left empty, class names will be checked with the set naming style. -#class-rgx= - -# Naming style matching correct constant names. -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names. Overrides const-naming- -# style. If left empty, constant names will be checked with the set naming -# style. -#const-rgx= - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming style matching correct function names. -function-naming-style=snake_case - -# Regular expression matching correct function names. Overrides function- -# naming-style. If left empty, function names will be checked with the set -# naming style. -#function-rgx= - -# Good variable names which should always be accepted, separated by a comma. -good-names=i, - j, - k, - ex, - Run, - _, - e, - n, - id - -# Good variable names regexes, separated by a comma. If names match any regex, -# they will always be accepted -good-names-rgxs= - -# Include a hint for the correct naming format with invalid-name. -include-naming-hint=no - -# Naming style matching correct inline iteration names. -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style. If left empty, inline iteration names will be checked -# with the set naming style. -#inlinevar-rgx= - -# Naming style matching correct method names. -method-naming-style=snake_case - -# Regular expression matching correct method names. Overrides method-naming- -# style. If left empty, method names will be checked with the set naming style. -#method-rgx= - -# Naming style matching correct module names. -module-naming-style=snake_case - -# Regular expression matching correct module names. Overrides module-naming- -# style. If left empty, module names will be checked with the set naming style. -#module-rgx= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -# These decorators are taken in consideration only for invalid-name. -property-classes=abc.abstractproperty - -# Regular expression matching correct type alias names. If left empty, type -# alias names will be checked with the set naming style. -typealias-rgx=.* - -# Regular expression matching correct type variable names. If left empty, type -# variable names will be checked with the set naming style. -#typevar-rgx= - -# Naming style matching correct variable names. -variable-naming-style=snake_case - -# Regular expression matching correct variable names. Overrides variable- -# naming-style. If left empty, variable names will be checked with the set -# naming style. -#variable-rgx= - - -[CLASSES] - -# Warn about protected attribute access inside special methods -check-protected-access-in-special-methods=no - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp, - asyncSetUp, - __post_init__ - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[DESIGN] - -# List of regular expressions of class ancestor names to ignore when counting -# public methods (see R0903) -exclude-too-few-public-methods= - -# List of qualified class names to ignore when counting class parents (see -# R0901) -ignored-parents= - -# Maximum number of arguments for function / method. -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in an if statement (see R0916). -max-bool-expr=5 - -# Maximum number of branch for function / method body. -max-branches=12 - -# Maximum number of locals for function / method body. -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=25 - -# Maximum number of return / yield for function / method body. -max-returns=6 - -# Maximum number of statements in function / method body. -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when caught. -overgeneral-exceptions=builtins.BaseException,builtins.Exception - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=100 - -# Maximum number of lines in a module. -max-module-lines=1000 - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - - -[IMPORTS] - -# List of modules that can be imported at any level, not just the top level -# one. -allow-any-import-level= - -# Allow explicit reexports by alias from a package __init__. -allow-reexport-from-package=no - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Deprecated modules which should not be used, separated by a comma. -deprecated-modules= - -# Output a graph (.gv or any supported image format) of external dependencies -# to the given file (report RP0402 must not be disabled). -ext-import-graph= - -# Output a graph (.gv or any supported image format) of all (i.e. internal and -# external) dependencies to the given file (report RP0402 must not be -# disabled). -import-graph= - -# Output a graph (.gv or any supported image format) of internal dependencies -# to the given file (report RP0402 must not be disabled). -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Couples of modules and preferred modules, separated by a comma. -preferred-modules= - - -[LOGGING] - -# The type of string formatting that logging methods do. `old` means using % -# formatting, `new` is for `{}` formatting. -logging-format-style=old - -# Logging modules to check that the string format arguments are in logging -# function parameter format. -logging-modules=logging - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, -# UNDEFINED. -confidence=HIGH, - CONTROL_FLOW, - INFERENCE, - INFERENCE_FAILURE, - UNDEFINED - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then re-enable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use "--disable=all --enable=classes -# --disable=W". -disable=raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, - suppressed-message, - useless-suppression, - deprecated-pragma, - use-implicit-booleaness-not-comparison-to-string, - use-implicit-booleaness-not-comparison-to-zero, - use-symbolic-message-instead, - trailing-whitespace, - line-too-long, - missing-class-docstring, - missing-module-docstring, - missing-function-docstring, - too-many-instance-attributes, - wrong-import-order, - too-many-arguments, - broad-exception-raised, - too-few-public-methods, - too-many-branches, - duplicate-code, - trailing-newlines, - too-many-public-methods, - too-many-locals, - too-many-lines, - using-constant-test, - too-many-statements, - cyclic-import, - too-many-nested-blocks, - too-many-boolean-expressions, - no-else-raise, - bare-except, - broad-exception-caught, - fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable= - - -[METHOD_ARGS] - -# List of qualified names (i.e., library.method) which require a timeout -# parameter e.g. 'requests.api.get,requests.api.post' -timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO - -# Regular expression of note tags to take in consideration. -notes-rgx= - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=sys.exit,argparse.parse_error - - -[REPORTS] - -# Python expression which should return a score less than or equal to 10. You -# have access to the variables 'fatal', 'error', 'warning', 'refactor', -# 'convention', and 'info' which contain the number of messages in each -# category, as well as 'statement' which is the total number of statements -# analyzed. This score is used by the global evaluation report (RP0004). -evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details. -msg-template= - -# Set the output format. Available formats are: text, parseable, colorized, -# json2 (improved json format), json (old json format) and msvs (visual -# studio). You can also give a reporter class, e.g. -# mypackage.mymodule.MyReporterClass. -#output-format= - -# Tells whether to display a full report or only the messages. -reports=no - -# Activate the evaluation score. -score=yes - - -[SIMILARITIES] - -# Comments are removed from the similarity computation -ignore-comments=yes - -# Docstrings are removed from the similarity computation -ignore-docstrings=yes - -# Imports are removed from the similarity computation -ignore-imports=yes - -# Signatures are removed from the similarity computation -ignore-signatures=yes - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[SPELLING] - -# Limits count of emitted suggestions for spelling mistakes. -max-spelling-suggestions=4 - -# Spelling dictionary name. No available dictionaries : You need to install -# both the python package and the system dependency for enchant to work. -spelling-dict= - -# List of comma separated words that should be considered directives if they -# appear at the beginning of a comment and should not be checked. -spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains the private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to the private dictionary (see the -# --spelling-private-dict-file option) instead of raising a message. -spelling-store-unknown-words=no - - -[STRING] - -# This flag controls whether inconsistent-quotes generates a warning when the -# character used as a quote delimiter is used inconsistently within a module. -check-quote-consistency=no - -# This flag controls whether the implicit-str-concat should generate a warning -# on implicit string concatenation in sequences defined over several lines. -check-str-concat-over-line-jumps=no - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# Tells whether to warn about missing members when the owner of the attribute -# is inferred to be None. -ignore-none=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of symbolic message names to ignore for Mixin members. -ignored-checks-for-mixins=no-member, - not-async-context-manager, - not-context-manager, - attribute-defined-outside-init - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - -# Regex pattern to define which classes are considered mixins. -mixin-class-rgx=.*[Mm]ixin - -# List of decorators that change the signature of a decorated function. -signature-mutators= - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of names allowed to shadow builtins -allowed-redefined-builtins=id,object - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb - -# A regular expression matching the name of dummy variables (i.e. expected to -# not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml deleted file mode 100644 index cca906a7..00000000 --- a/packages/mistralai_azure/pyproject.toml +++ /dev/null @@ -1,58 +0,0 @@ -[project] -name = "mistralai_azure" -version = "1.6.0" -description = "Python Client SDK for the Mistral AI API in Azure." -authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", -] - -[tool.poetry] -packages = [ - { include = "mistralai_azure", from = "src" } -] -include = ["py.typed", "src/mistralai_azure/py.typed"] - -[tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_azure/py.typed"] - -[virtualenvs] -in-project = true - -[tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - -[tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" -pythonpath = ["src"] - -[tool.mypy] -disable_error_code = "misc" - -[[tool.mypy.overrides]] -module = "typing_inspect" -ignore_missing_imports = true - -[[tool.mypy.overrides]] -module = "jsonpath" -ignore_missing_imports = true - -[tool.pyright] -venvPath = "." -venv = ".venv" - - diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/mistralai_azure/scripts/prepare_readme.py deleted file mode 100644 index 825d9ded..00000000 --- a/packages/mistralai_azure/scripts/prepare_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import shutil - -try: - shutil.copyfile("README.md", "README-PYPI.md") -except Exception as e: - print("Failed to copy README.md to README-PYPI.md") - print(e) diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh deleted file mode 100755 index f2f2cf2c..00000000 --- a/packages/mistralai_azure/scripts/publish.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} - -poetry run python scripts/prepare_readme.py - -poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py deleted file mode 100644 index dd02e42e..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) -from .sdk import * -from .sdkconfiguration import * -from .models import * - - -VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py deleted file mode 100644 index 77df6aef..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py +++ /dev/null @@ -1,22 +0,0 @@ -# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py -from typing import Union - -import httpx - -from .types import BeforeRequestContext, BeforeRequestHook - -PREFIX = "mistral-client-python/" - -class CustomUserAgentHook(BeforeRequestHook): - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - current = request.headers["user-agent"] - if current.startswith(PREFIX): - return request - - request.headers["user-agent"] = ( - PREFIX + current.split(" ")[1] - ) - - return request diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py deleted file mode 100644 index 304edfa2..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py +++ /dev/null @@ -1,15 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py deleted file mode 100644 index 37ff4e9f..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from .types import ( - SDKInitHook, - BeforeRequestContext, - BeforeRequestHook, - AfterSuccessContext, - AfterSuccessHook, - AfterErrorContext, - AfterErrorHook, - Hooks, -) -from .registration import init_hooks -from typing import List, Optional, Tuple -from mistralai_azure.httpclient import HttpClient - - -class SDKHooks(Hooks): - def __init__(self) -> None: - self.sdk_init_hooks: List[SDKInitHook] = [] - self.before_request_hooks: List[BeforeRequestHook] = [] - self.after_success_hooks: List[AfterSuccessHook] = [] - self.after_error_hooks: List[AfterErrorHook] = [] - init_hooks(self) - - def register_sdk_init_hook(self, hook: SDKInitHook) -> None: - self.sdk_init_hooks.append(hook) - - def register_before_request_hook(self, hook: BeforeRequestHook) -> None: - self.before_request_hooks.append(hook) - - def register_after_success_hook(self, hook: AfterSuccessHook) -> None: - self.after_success_hooks.append(hook) - - def register_after_error_hook(self, hook: AfterErrorHook) -> None: - self.after_error_hooks.append(hook) - - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - for hook in self.sdk_init_hooks: - base_url, client = hook.sdk_init(base_url, client) - return base_url, client - - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> httpx.Request: - for hook in self.before_request_hooks: - out = hook.before_request(hook_ctx, request) - if isinstance(out, Exception): - raise out - request = out - - return request - - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> httpx.Response: - for hook in self.after_success_hooks: - out = hook.after_success(hook_ctx, response) - if isinstance(out, Exception): - raise out - response = out - return response - - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: - for hook in self.after_error_hooks: - result = hook.after_error(hook_ctx, response, error) - if isinstance(result, Exception): - raise result - response, error = result - return response, error diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py deleted file mode 100644 index 297dfa2f..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from abc import ABC, abstractmethod -import httpx -from mistralai_azure.httpclient import HttpClient -from typing import Any, Callable, List, Optional, Tuple, Union - - -class HookContext: - base_url: str - operation_id: str - oauth2_scopes: Optional[List[str]] = None - security_source: Optional[Union[Any, Callable[[], Any]]] = None - - def __init__( - self, - base_url: str, - operation_id: str, - oauth2_scopes: Optional[List[str]], - security_source: Optional[Union[Any, Callable[[], Any]]], - ): - self.base_url = base_url - self.operation_id = operation_id - self.oauth2_scopes = oauth2_scopes - self.security_source = security_source - - -class BeforeRequestContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterSuccessContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterErrorContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class SDKInitHook(ABC): - @abstractmethod - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - pass - - -class BeforeRequestHook(ABC): - @abstractmethod - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - pass - - -class AfterSuccessHook(ABC): - @abstractmethod - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> Union[httpx.Response, Exception]: - pass - - -class AfterErrorHook(ABC): - @abstractmethod - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: - pass - - -class Hooks(ABC): - @abstractmethod - def register_sdk_init_hook(self, hook: SDKInitHook): - pass - - @abstractmethod - def register_before_request_hook(self, hook: BeforeRequestHook): - pass - - @abstractmethod - def register_after_success_hook(self, hook: AfterSuccessHook): - pass - - @abstractmethod - def register_after_error_hook(self, hook: AfterErrorHook): - pass diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py deleted file mode 100644 index 65696610..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import importlib.metadata - -__title__: str = "mistralai_azure" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai_azure" - -try: - if __package__ is not None: - __version__ = importlib.metadata.version(__package__) -except importlib.metadata.PackageNotFoundError: - pass diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py deleted file mode 100644 index 24e4935e..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ /dev/null @@ -1,362 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkconfiguration import SDKConfiguration -import httpx -from mistralai_azure import models, utils -from mistralai_azure._hooks import ( - AfterErrorContext, - AfterSuccessContext, - BeforeRequestContext, -) -from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Mapping, Optional, Tuple -from urllib.parse import parse_qs, urlparse - - -class BaseSDK: - sdk_configuration: SDKConfiguration - - def __init__(self, sdk_config: SDKConfiguration) -> None: - self.sdk_configuration = sdk_config - - def _get_url(self, base_url, url_variables): - sdk_url, sdk_variables = self.sdk_configuration.get_server_details() - - if base_url is None: - base_url = sdk_url - - if url_variables is None: - url_variables = sdk_variables - - return utils.template_url(base_url, url_variables) - - def _build_request_async( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.async_client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - ) - - def _build_request( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - ) - - def _build_request_with_client( - self, - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - query_params = {} - - url = url_override - if url is None: - url = utils.generate_url( - self._get_url(base_url, url_variables), - path, - request if request_has_path_params else None, - _globals if request_has_path_params else None, - ) - - query_params = utils.get_query_params( - request if request_has_query_params else None, - _globals if request_has_query_params else None, - ) - else: - # Pick up the query parameter from the override so they can be - # preserved when building the request later on (necessary as of - # httpx 0.28). - parsed_override = urlparse(str(url_override)) - query_params = parse_qs(parsed_override.query, keep_blank_values=True) - - headers = utils.get_headers(request, _globals) - headers["Accept"] = accept_header_value - headers[user_agent_header] = self.sdk_configuration.user_agent - - if security is not None: - if callable(security): - security = security() - - if security is not None: - security_headers, security_query_params = utils.get_security(security) - headers = {**headers, **security_headers} - query_params = {**query_params, **security_query_params} - - serialized_request_body = SerializedRequestBody() - if get_serialized_body is not None: - rb = get_serialized_body() - if request_body_required and rb is None: - raise ValueError("request body is required") - - if rb is not None: - serialized_request_body = rb - - if ( - serialized_request_body.media_type is not None - and serialized_request_body.media_type - not in ( - "multipart/form-data", - "multipart/mixed", - ) - ): - headers["content-type"] = serialized_request_body.media_type - - if http_headers is not None: - for header, value in http_headers.items(): - headers[header] = value - - timeout = timeout_ms / 1000 if timeout_ms is not None else None - - return client.build_request( - method, - url, - params=query_params, - content=serialized_request_body.content, - data=serialized_request_body.data, - files=serialized_request_body.files, - headers=headers, - timeout=timeout, - ) - - def do_request( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.client - logger = self.sdk_configuration.debug_logger - - def do(): - http_res = None - try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = client.send(req, stream=stream) - except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") - - return http_res - - if retry_config is not None: - http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) - else: - http_res = do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) - - return http_res - - async def do_request_async( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.async_client - logger = self.sdk_configuration.debug_logger - - async def do(): - http_res = None - try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = await client.send(req, stream=stream) - except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") - - return http_res - - if retry_config is not None: - http_res = await utils.retry_async( - do, utils.Retries(retry_config[0], retry_config[1]) - ) - else: - http_res = await do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) - - return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py deleted file mode 100644 index cf3511fd..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ /dev/null @@ -1,704 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai_azure import models, utils -from mistralai_azure._hooks import HookContext -from mistralai_azure.types import OptionalNullable, UNSET -from mistralai_azure.utils import eventstreaming -from typing import Any, List, Mapping, Optional, Union - - -class Chat(BaseSDK): - r"""Chat Completion API.""" - - def stream( - self, - *, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: Optional[str] = "azureai", - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def stream_async( - self, - *, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: Optional[str] = "azureai", - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def complete( - self, - *, - messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], - ], - model: Optional[str] = "azureai", - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.ChatCompletionRequestStop, - models.ChatCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: - r"""Chat Completion - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def complete_async( - self, - *, - messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], - ], - model: Optional[str] = "azureai", - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.ChatCompletionRequestStop, - models.ChatCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: - r"""Chat Completion - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py deleted file mode 100644 index 1e426352..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -# pyright: reportReturnType = false -import asyncio -from concurrent.futures import ThreadPoolExecutor -from typing_extensions import Protocol, runtime_checkable -import httpx -from typing import Any, Optional, Union - - -@runtime_checkable -class HttpClient(Protocol): - def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - def close(self) -> None: - pass - - -@runtime_checkable -class AsyncHttpClient(Protocol): - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - async def aclose(self) -> None: - pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - is_async = False - try: - asyncio.get_running_loop() - is_async = True - except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: - asyncio.run(async_client.aclose()) - except Exception: - pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py deleted file mode 100644 index 2229c469..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ /dev/null @@ -1,210 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) - - -__all__ = [ - "Arguments", - "ArgumentsTypedDict", - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentTypedDict", - "AssistantMessageRole", - "AssistantMessageTypedDict", - "ChatCompletionChoice", - "ChatCompletionChoiceFinishReason", - "ChatCompletionChoiceTypedDict", - "ChatCompletionRequest", - "ChatCompletionRequestMessages", - "ChatCompletionRequestMessagesTypedDict", - "ChatCompletionRequestStop", - "ChatCompletionRequestStopTypedDict", - "ChatCompletionRequestToolChoice", - "ChatCompletionRequestToolChoiceTypedDict", - "ChatCompletionRequestTypedDict", - "ChatCompletionResponse", - "ChatCompletionResponseTypedDict", - "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestToolChoice", - "ChatCompletionStreamRequestToolChoiceTypedDict", - "ChatCompletionStreamRequestTypedDict", - "CompletionChunk", - "CompletionChunkTypedDict", - "CompletionEvent", - "CompletionEventTypedDict", - "CompletionResponseStreamChoice", - "CompletionResponseStreamChoiceTypedDict", - "Content", - "ContentChunk", - "ContentChunkTypedDict", - "ContentTypedDict", - "DeltaMessage", - "DeltaMessageTypedDict", - "FinishReason", - "Function", - "FunctionCall", - "FunctionCallTypedDict", - "FunctionName", - "FunctionNameTypedDict", - "FunctionTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", - "ImageURL", - "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", - "ImageURLChunkTypedDict", - "ImageURLTypedDict", - "JSONSchema", - "JSONSchemaTypedDict", - "Loc", - "LocTypedDict", - "Messages", - "MessagesTypedDict", - "Prediction", - "PredictionTypedDict", - "ReferenceChunk", - "ReferenceChunkType", - "ReferenceChunkTypedDict", - "ResponseFormat", - "ResponseFormatTypedDict", - "ResponseFormats", - "Role", - "SDKError", - "Security", - "SecurityTypedDict", - "Stop", - "StopTypedDict", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentTypedDict", - "SystemMessageTypedDict", - "TextChunk", - "TextChunkTypedDict", - "Tool", - "ToolCall", - "ToolCallTypedDict", - "ToolChoice", - "ToolChoiceEnum", - "ToolChoiceTypedDict", - "ToolMessage", - "ToolMessageContent", - "ToolMessageContentTypedDict", - "ToolMessageRole", - "ToolMessageTypedDict", - "ToolTypedDict", - "ToolTypes", - "Type", - "UsageInfo", - "UsageInfoTypedDict", - "UserMessage", - "UserMessageContent", - "UserMessageContentTypedDict", - "UserMessageRole", - "UserMessageTypedDict", - "ValidationError", - "ValidationErrorTypedDict", -] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py deleted file mode 100644 index 530b33df..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AssistantMessageContentTypedDict = TypeAliasType( - "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -AssistantMessageContent = TypeAliasType( - "AssistantMessageContent", Union[str, List[ContentChunk]] -) - - -AssistantMessageRole = Literal["assistant"] - - -class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[AssistantMessageContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] - - -class AssistantMessage(BaseModel): - content: OptionalNullable[AssistantMessageContent] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - - role: Optional[AssistantMessageRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py deleted file mode 100644 index a78b72d5..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_azure.types import BaseModel, UnrecognizedStr -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Literal, Union -from typing_extensions import Annotated, TypedDict - - -ChatCompletionChoiceFinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr -] - - -class ChatCompletionChoiceTypedDict(TypedDict): - index: int - message: AssistantMessageTypedDict - finish_reason: ChatCompletionChoiceFinishReason - - -class ChatCompletionChoice(BaseModel): - index: int - - message: AssistantMessage - - finish_reason: Annotated[ - ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) - ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py deleted file mode 100644 index f48c1f50..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ /dev/null @@ -1,195 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from mistralai_azure.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ChatCompletionRequestStopTypedDict = TypeAliasType( - "ChatCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestStop = TypeAliasType( - "ChatCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -ChatCompletionRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -ChatCompletionRequestToolChoice = TypeAliasType( - "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class ChatCompletionRequestTypedDict(TypedDict): - messages: List[ChatCompletionRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[str] - r"""The ID of the model to use for this request.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[ChatCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionRequest(BaseModel): - messages: List[ChatCompletionRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - model: Optional[str] = "azureai" - r"""The ID of the model to use for this request.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[ChatCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[ChatCompletionRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "model", - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "safe_prompt", - ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py deleted file mode 100644 index ecd85d5c..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_azure.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class ChatCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - - -class ChatCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: Optional[int] = None - - choices: Optional[List[ChatCompletionChoice]] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py deleted file mode 100644 index 50cf1f01..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ /dev/null @@ -1,189 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from mistralai_azure.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = TypeAliasType("Stop", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Messages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -ChatCompletionStreamRequestToolChoice = TypeAliasType( - "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class ChatCompletionStreamRequestTypedDict(TypedDict): - messages: List[MessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[str] - r"""The ID of the model to use for this request.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[StopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionStreamRequest(BaseModel): - messages: List[Messages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - model: Optional[str] = "azureai" - r"""The ID of the model to use for this request.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[Stop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "model", - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "safe_prompt", - ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py deleted file mode 100644 index d6cc2a86..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_azure.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionChunkTypedDict(TypedDict): - id: str - model: str - choices: List[CompletionResponseStreamChoiceTypedDict] - object: NotRequired[str] - created: NotRequired[int] - usage: NotRequired[UsageInfoTypedDict] - - -class CompletionChunk(BaseModel): - id: str - - model: str - - choices: List[CompletionResponseStreamChoice] - - object: Optional[str] = None - - created: Optional[int] = None - - usage: Optional[UsageInfo] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py deleted file mode 100644 index 5a2039c2..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai_azure.types import BaseModel -from typing_extensions import TypedDict - - -class CompletionEventTypedDict(TypedDict): - data: CompletionChunkTypedDict - - -class CompletionEvent(BaseModel): - data: CompletionChunk diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py deleted file mode 100644 index 37294d9b..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from mistralai_azure.utils import validate_open_enum -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing import Literal, Union -from typing_extensions import Annotated, TypedDict - - -FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] - - -class CompletionResponseStreamChoiceTypedDict(TypedDict): - index: int - delta: DeltaMessageTypedDict - finish_reason: Nullable[FinishReason] - - -class CompletionResponseStreamChoice(BaseModel): - index: int - - delta: DeltaMessage - - finish_reason: Annotated[ - Nullable[FinishReason], PlainValidator(validate_open_enum(False)) - ] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py deleted file mode 100644 index e6a3e24a..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_azure.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", - Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], -) - - -ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py deleted file mode 100644 index 112eb127..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ /dev/null @@ -1,67 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) - - -class DeltaMessageTypedDict(TypedDict): - role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - - -class DeltaMessage(BaseModel): - role: OptionalNullable[str] = UNSET - - content: OptionalNullable[Content] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py deleted file mode 100644 index a4642f92..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class FunctionTypedDict(TypedDict): - name: str - parameters: Dict[str, Any] - description: NotRequired[str] - strict: NotRequired[bool] - - -class Function(BaseModel): - name: str - - parameters: Dict[str, Any] - - description: Optional[str] = None - - strict: Optional[bool] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py deleted file mode 100644 index dd93c462..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) - - -Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) - - -class FunctionCallTypedDict(TypedDict): - name: str - arguments: ArgumentsTypedDict - - -class FunctionCall(BaseModel): - name: str - - arguments: Arguments diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py deleted file mode 100644 index b55c82af..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing_extensions import TypedDict - - -class FunctionNameTypedDict(TypedDict): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str - - -class FunctionName(BaseModel): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py deleted file mode 100644 index 1d22d97a..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .validationerror import ValidationError -from mistralai_azure import utils -from mistralai_azure.types import BaseModel -from typing import List, Optional - - -class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None - - -class HTTPValidationError(Exception): - data: HTTPValidationErrorData - - def __init__(self, data: HTTPValidationErrorData): - self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py deleted file mode 100644 index 8faa272b..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class ImageURLTypedDict(TypedDict): - url: str - detail: NotRequired[Nullable[str]] - - -class ImageURL(BaseModel): - url: str - - detail: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py deleted file mode 100644 index 734d7f79..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai_azure.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url"] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py deleted file mode 100644 index b2d07d3a..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py deleted file mode 100644 index 888337d3..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py deleted file mode 100644 index 4df3bfbc..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference"] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py deleted file mode 100644 index cfd58dcf..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .responseformats import ResponseFormats -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ResponseFormatTypedDict(TypedDict): - type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] - - -class ResponseFormat(BaseModel): - type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py deleted file mode 100644 index 08c39951..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py deleted file mode 100644 index 03216cbf..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from dataclasses import dataclass -from typing import Optional -import httpx - - -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" - - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None - - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" - - return f"{self.message}: Status {self.status_code}{body}" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py deleted file mode 100644 index c1ae8313..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import FieldMetadata, SecurityMetadata -from typing_extensions import Annotated, TypedDict - - -class SecurityTypedDict(TypedDict): - api_key: str - - -class Security(BaseModel): - api_key: Annotated[ - str, - FieldMetadata( - security=SecurityMetadata( - scheme=True, - scheme_type="http", - sub_type="bearer", - field_name="Authorization", - ) - ), - ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py deleted file mode 100644 index b7d975b6..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] -) - - -SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] -) - - -Role = Literal["system"] - - -class SystemMessageTypedDict(TypedDict): - content: SystemMessageContentTypedDict - role: NotRequired[Role] - - -class SystemMessage(BaseModel): - content: SystemMessageContent - - role: Optional[Role] = "system" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py deleted file mode 100644 index be60c8f9..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["text"] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[Type] - - -class TextChunk(BaseModel): - text: str - - type: Optional[Type] = "text" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py deleted file mode 100644 index ffd9b062..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[ToolTypes] - - -class Tool(BaseModel): - function: Function - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py deleted file mode 100644 index 6ccdcaa2..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncall import FunctionCall, FunctionCallTypedDict -from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolCallTypedDict(TypedDict): - function: FunctionCallTypedDict - id: NotRequired[str] - type: NotRequired[ToolTypes] - index: NotRequired[int] - - -class ToolCall(BaseModel): - function: FunctionCall - - id: Optional[str] = "null" - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) - - index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py deleted file mode 100644 index cc3c2c1f..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functionname import FunctionName, FunctionNameTypedDict -from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolChoiceTypedDict(TypedDict): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionNameTypedDict - r"""this restriction of `Function` is used to select a specific function to call""" - type: NotRequired[ToolTypes] - - -class ToolChoice(BaseModel): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionName - r"""this restriction of `Function` is used to select a specific function to call""" - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py deleted file mode 100644 index 8e6a6ad8..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ToolChoiceEnum = Literal["auto", "none", "any", "required"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py deleted file mode 100644 index 3e9aa3da..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolMessageContentTypedDict = TypeAliasType( - "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) - - -ToolMessageRole = Literal["tool"] - - -class ToolMessageTypedDict(TypedDict): - content: Nullable[ToolMessageContentTypedDict] - tool_call_id: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] - - -class ToolMessage(BaseModel): - content: Nullable[ToolMessageContent] - - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - role: Optional[ToolMessageRole] = "tool" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py deleted file mode 100644 index dfcd31f0..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import UnrecognizedStr -from typing import Literal, Union - - -ToolTypes = Union[Literal["function"], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py deleted file mode 100644 index b1d094fc..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing_extensions import TypedDict - - -class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class UsageInfo(BaseModel): - prompt_tokens: int - - completion_tokens: int - - total_tokens: int diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py deleted file mode 100644 index 8cce1745..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -UserMessageContentTypedDict = TypeAliasType( - "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) - - -UserMessageRole = Literal["user"] - - -class UserMessageTypedDict(TypedDict): - content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] - - -class UserMessage(BaseModel): - content: Nullable[UserMessageContent] - - role: Optional[UserMessageRole] = "user" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role"] - nullable_fields = ["content"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py deleted file mode 100644 index 4caff4a6..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import List, Union -from typing_extensions import TypeAliasType, TypedDict - - -LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) - - -Loc = TypeAliasType("Loc", Union[str, int]) - - -class ValidationErrorTypedDict(TypedDict): - loc: List[LocTypedDict] - msg: str - type: str - - -class ValidationError(BaseModel): - loc: List[Loc] - - msg: str - - type: str diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py deleted file mode 100644 index 8379e55f..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import weakref -from typing import Any, Callable, Dict, Optional, Union, cast - -import httpx - -from mistralai_azure import models, utils -from mistralai_azure._hooks import SDKHooks -from mistralai_azure.chat import Chat -from mistralai_azure.types import UNSET, OptionalNullable - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig - - -class MistralAzure(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - chat: Chat - r"""Chat Completion API.""" - - def __init__( - self, - azure_api_key: Union[str, Callable[[], str]], - azure_endpoint: str, - url_params: Optional[Dict[str, str]] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param azure_api_key: The azure_api_key required for authentication - :param azure_endpoint: The Azure AI endpoint URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - - # if azure_endpoint doesn't end with `/v1` add it - if not azure_endpoint.endswith("/"): - azure_endpoint += "/" - if not azure_endpoint.endswith("v1/"): - azure_endpoint += "v1/" - server_url = azure_endpoint - - client_supplied = True - if client is None: - client = httpx.Client() - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(azure_api_key): - security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment - else: - security = models.Security(api_key=azure_api_key) - - if server_url is not None: - if url_params is not None: - server_url = utils.template_url(server_url, url_params) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=server_url, - server=None, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - ) - - hooks = SDKHooks() - - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - self._init_sdks() - - def _init_sdks(self): - self.chat = Chat(self.sdk_configuration) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py deleted file mode 100644 index 605e5d74..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._hooks import SDKHooks -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) -from .httpclient import AsyncHttpClient, HttpClient -from .utils import Logger, RetryConfig, remove_suffix -from dataclasses import dataclass -from mistralai_azure import models -from mistralai_azure.types import OptionalNullable, UNSET -from pydantic import Field -from typing import Callable, Dict, Optional, Tuple, Union - - -SERVER_EU = "eu" -r"""EU Production server""" -SERVERS = { - SERVER_EU: "https://api.mistral.ai", -} -"""Contains the list of servers available to the SDK""" - - -@dataclass -class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool - debug_logger: Logger - security: Optional[Union[models.Security, Callable[[], models.Security]]] = None - server_url: Optional[str] = "" - server: Optional[str] = "" - language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ - retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) - timeout_ms: Optional[int] = None - - def __post_init__(self): - self._hooks = SDKHooks() - - def get_server_details(self) -> Tuple[str, Dict[str, str]]: - if self.server_url is not None and self.server_url: - return remove_suffix(self.server_url, "/"), {} - if not self.server: - self.server = SERVER_EU - - if self.server not in SERVERS: - raise ValueError(f'Invalid server "{self.server}"') - - return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py deleted file mode 100644 index a6187efa..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from pydantic import ConfigDict, model_serializer -from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType -from typing_extensions import TypeAliasType, TypeAlias - - -class BaseModel(PydanticBaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() - ) - - -class Unset(BaseModel): - @model_serializer(mode="plain") - def serialize_model(self): - return UNSET_SENTINEL - - def __bool__(self) -> Literal[False]: - return False - - -UNSET = Unset() -UNSET_SENTINEL = "~?~unset~?~sentinel~?~" - - -T = TypeVar("T") -if TYPE_CHECKING: - Nullable: TypeAlias = Union[T, None] - OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] -else: - Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) - OptionalNullable = TypeAliasType( - "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) - ) - -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py deleted file mode 100644 index 3cded8fe..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger - -__all__ = [ - "BackoffStrategy", - "FieldMetadata", - "find_metadata", - "FormMetadata", - "generate_url", - "get_body_content", - "get_default_logger", - "get_discriminator", - "get_global_from_env", - "get_headers", - "get_pydantic_model", - "get_query_params", - "get_response_headers", - "get_security", - "HeaderMetadata", - "Logger", - "marshal_json", - "match_content_type", - "match_status_codes", - "match_response", - "MultipartFormMetadata", - "OpenEnumMeta", - "PathParamMetadata", - "QueryParamMetadata", - "remove_suffix", - "Retries", - "retry", - "retry_async", - "RetryConfig", - "RequestMetadata", - "SecurityMetadata", - "serialize_decimal", - "serialize_float", - "serialize_int", - "serialize_request_body", - "SerializedRequestBody", - "stream_to_text", - "stream_to_text_async", - "stream_to_bytes", - "stream_to_bytes_async", - "template_url", - "unmarshal", - "unmarshal_json", - "validate_decimal", - "validate_const", - "validate_float", - "validate_int", - "validate_open_enum", - "cast_partial", -] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py deleted file mode 100644 index 387874ed..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from enum import Enum -from typing import Any, Optional - -def get_discriminator(model: Any, fieldname: str, key: str) -> str: - """ - Recursively search for the discriminator attribute in a model. - - Args: - model (Any): The model to search within. - fieldname (str): The name of the field to search for. - key (str): The key to search for in dictionaries. - - Returns: - str: The name of the discriminator attribute. - - Raises: - ValueError: If the discriminator attribute is not found. - """ - upper_fieldname = fieldname.upper() - - def get_field_discriminator(field: Any) -> Optional[str]: - """Search for the discriminator attribute in a given field.""" - - if isinstance(field, dict): - if key in field: - return f'{field[key]}' - - if hasattr(field, fieldname): - attr = getattr(field, fieldname) - if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' - - if hasattr(field, upper_fieldname): - attr = getattr(field, upper_fieldname) - if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' - - return None - - - if isinstance(model, list): - for field in model: - discriminator = get_field_discriminator(field) - if discriminator is not None: - return discriminator - - discriminator = get_field_discriminator(model) - if discriminator is not None: - return discriminator - - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py deleted file mode 100644 index c650b10c..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import enum - - -class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py deleted file mode 100644 index 74a63f75..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py +++ /dev/null @@ -1,238 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import re -import json -from typing import ( - Callable, - Generic, - TypeVar, - Optional, - Generator, - AsyncGenerator, - Tuple, -) -import httpx - -T = TypeVar("T") - - -class EventStream(Generic[T]): - response: httpx.Response - generator: Generator[T, None, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - ): - self.response = response - self.generator = stream_events(response, decoder, sentinel) - - def __iter__(self): - return self - - def __next__(self): - return next(self.generator) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.response.close() - - -class EventStreamAsync(Generic[T]): - response: httpx.Response - generator: AsyncGenerator[T, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - ): - self.response = response - self.generator = stream_events_async(response, decoder, sentinel) - - def __aiter__(self): - return self - - async def __anext__(self): - return await self.generator.__anext__() - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.response.aclose() - - -class ServerEvent: - id: Optional[str] = None - event: Optional[str] = None - data: Optional[str] = None - retry: Optional[int] = None - - -MESSAGE_BOUNDARIES = [ - b"\r\n\r\n", - b"\n\n", - b"\r\r", -] - - -async def stream_events_async( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> AsyncGenerator[T, None]: - buffer = bytearray() - position = 0 - discard = False - async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def stream_events( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> Generator[T, None, None]: - buffer = bytearray() - position = 0 - discard = False - for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: - block = raw.decode() - lines = re.split(r"\r?\n|\r", block) - publish = False - event = ServerEvent() - data = "" - for line in lines: - if not line: - continue - - delim = line.find(":") - if delim <= 0: - continue - - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] - - if field == "event": - event.event = value - publish = True - elif field == "data": - data += value + "\n" - publish = True - elif field == "id": - event.id = value - publish = True - elif field == "retry": - event.retry = int(value) if value.isdigit() else None - publish = True - - if sentinel and data == f"{sentinel}\n": - return None, True - - if data: - data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass - - out = None - if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False - - -def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): - if len(sequence) > (len(buffer) - position): - return None - - for i, seq in enumerate(sequence): - if buffer[position + i] != seq: - return None - - return sequence diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py deleted file mode 100644 index 0472aba8..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ /dev/null @@ -1,202 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .serializers import marshal_json - -from .metadata import ( - FormMetadata, - MultipartFormMetadata, - find_field_metadata, -) -from .values import _is_set, _val_to_string - - -def _populate_form( - field_name: str, - explode: bool, - obj: Any, - delimiter: str, - form: Dict[str, List[str]], -): - if not _is_set(obj): - return form - - if isinstance(obj, BaseModel): - items = [] - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_field_name = obj_field.alias if obj_field.alias is not None else name - if obj_field_name == "": - continue - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - form[obj_field_name] = [_val_to_string(val)] - else: - items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, Dict): - items = [] - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - form[key] = [_val_to_string(value)] - else: - items.append(f"{key}{delimiter}{_val_to_string(value)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - if explode: - if not field_name in form: - form[field_name] = [] - form[field_name].append(_val_to_string(value)) - else: - items.append(_val_to_string(value)) - - if len(items) > 0: - form[field_name] = [delimiter.join([str(item) for item in items])] - else: - form[field_name] = [_val_to_string(obj)] - - return form - - -def serialize_multipart_form( - media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: - form: Dict[str, Any] = {} - files: Dict[str, Any] = {} - - if not isinstance(request, BaseModel): - raise TypeError("invalid request body type") - - request_fields: Dict[str, FieldInfo] = request.__class__.model_fields - request_field_types = get_type_hints(request.__class__) - - for name in request_fields: - field = request_fields[name] - - val = getattr(request, name) - if not _is_set(val): - continue - - field_metadata = find_field_metadata(field, MultipartFormMetadata) - if not field_metadata: - continue - - f_name = field.alias if field.alias else name - - if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] - - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue - - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) - else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) - elif field_metadata.json: - files[f_name] = ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ) - else: - if isinstance(val, List): - values = [] - - for value in val: - if not _is_set(value): - continue - values.append(_val_to_string(value)) - - form[f_name + "[]"] = values - else: - form[f_name] = _val_to_string(val) - return media_type, form, files - - -def serialize_form_data(data: Any) -> Dict[str, Any]: - form: Dict[str, List[str]] = {} - - if isinstance(data, BaseModel): - data_fields: Dict[str, FieldInfo] = data.__class__.model_fields - data_field_types = get_type_hints(data.__class__) - for name in data_fields: - field = data_fields[name] - - val = getattr(data, name) - if not _is_set(val): - continue - - metadata = find_field_metadata(field, FormMetadata) - if metadata is None: - continue - - f_name = field.alias if field.alias is not None else name - - if metadata.json: - form[f_name] = [marshal_json(val, data_field_types[name])] - else: - if metadata.style == "form": - _populate_form( - f_name, - metadata.explode, - val, - ",", - form, - ) - else: - raise ValueError(f"Invalid form style for field {name}") - elif isinstance(data, Dict): - for key, value in data.items(): - if _is_set(value): - form[key] = [_val_to_string(value)] - else: - raise TypeError(f"Invalid request body type {type(data)} for form data") - - return form diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py deleted file mode 100644 index 37a6e7f9..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py +++ /dev/null @@ -1,205 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, -) - -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - QueryParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) -from .forms import _populate_form - - -def get_query_params( - query_params: Any, - gbls: Optional[Any] = None, -) -> Dict[str, List[str]]: - params: Dict[str, List[str]] = {} - - globals_already_populated = _populate_query_params(query_params, gbls, params, []) - if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) - - return params - - -def _populate_query_params( - query_params: Any, - gbls: Any, - query_param_values: Dict[str, List[str]], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(query_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields - param_field_types = get_type_hints(query_params.__class__) - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - - metadata = find_field_metadata(field, QueryParamMetadata) - if not metadata: - continue - - value = getattr(query_params, name) if _is_set(query_params) else None - - value, global_found = _populate_from_globals( - name, value, QueryParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - f_name = field.alias if field.alias is not None else name - serialization = metadata.serialization - if serialization is not None: - serialized_parms = _get_serialized_params( - metadata, f_name, value, param_field_types[name] - ) - for key, value in serialized_parms.items(): - if key in query_param_values: - query_param_values[key].extend(value) - else: - query_param_values[key] = [value] - else: - style = metadata.style - if style == "deepObject": - _populate_deep_object_query_params(f_name, value, query_param_values) - elif style == "form": - _populate_delimited_query_params( - metadata, f_name, value, ",", query_param_values - ) - elif style == "pipeDelimited": - _populate_delimited_query_params( - metadata, f_name, value, "|", query_param_values - ) - else: - raise NotImplementedError( - f"query param style {style} not yet supported" - ) - - return globals_already_populated - - -def _populate_deep_object_query_params( - field_name: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj): - return - - if isinstance(obj, BaseModel): - _populate_deep_object_query_params_basemodel(field_name, obj, params) - elif isinstance(obj, Dict): - _populate_deep_object_query_params_dict(field_name, obj, params) - - -def _populate_deep_object_query_params_basemodel( - prior_params_key: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj) or not isinstance(obj, BaseModel): - return - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - - f_name = obj_field.alias if obj_field.alias is not None else name - - params_key = f"{prior_params_key}[{f_name}]" - - obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if not _is_set(obj_param_metadata): - continue - - obj_val = getattr(obj, name) - if not _is_set(obj_val): - continue - - if isinstance(obj_val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, obj_val, params) - elif isinstance(obj_val, Dict): - _populate_deep_object_query_params_dict(params_key, obj_val, params) - elif isinstance(obj_val, List): - _populate_deep_object_query_params_list(params_key, obj_val, params) - else: - params[params_key] = [_val_to_string(obj_val)] - - -def _populate_deep_object_query_params_dict( - prior_params_key: str, - value: Dict, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for key, val in value.items(): - if not _is_set(val): - continue - - params_key = f"{prior_params_key}[{key}]" - - if isinstance(val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, val, params) - elif isinstance(val, Dict): - _populate_deep_object_query_params_dict(params_key, val, params) - elif isinstance(val, List): - _populate_deep_object_query_params_list(params_key, val, params) - else: - params[params_key] = [_val_to_string(val)] - - -def _populate_deep_object_query_params_list( - params_key: str, - value: List, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for val in value: - if not _is_set(val): - continue - - if params.get(params_key) is None: - params[params_key] = [] - - params[params_key].append(_val_to_string(val)) - - -def _populate_delimited_query_params( - metadata: QueryParamMetadata, - field_name: str, - obj: Any, - delimiter: str, - query_param_values: Dict[str, List[str]], -): - _populate_form( - field_name, - metadata.explode, - obj, - delimiter, - query_param_values, - ) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py deleted file mode 100644 index d5240dd5..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import io -from dataclasses import dataclass -import re -from typing import ( - Any, - Optional, -) - -from .forms import serialize_form_data, serialize_multipart_form - -from .serializers import marshal_json - -SERIALIZATION_METHOD_TO_CONTENT_TYPE = { - "json": "application/json", - "form": "application/x-www-form-urlencoded", - "multipart": "multipart/form-data", - "raw": "application/octet-stream", - "string": "text/plain", -} - - -@dataclass -class SerializedRequestBody: - media_type: Optional[str] = None - content: Optional[Any] = None - data: Optional[Any] = None - files: Optional[Any] = None - - -def serialize_request_body( - request_body: Any, - nullable: bool, - optional: bool, - serialization_method: str, - request_body_type, -) -> Optional[SerializedRequestBody]: - if request_body is None: - if not nullable and optional: - return None - - media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] - - serialized_request_body = SerializedRequestBody(media_type) - - if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: - serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"multipart\/.*", media_type) is not None: - ( - serialized_request_body.media_type, - serialized_request_body.data, - serialized_request_body.files, - ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: - serialized_request_body.data = serialize_form_data(request_body) - elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): - serialized_request_body.content = request_body - elif isinstance(request_body, str): - serialized_request_body.content = request_body - else: - raise TypeError( - f"invalid request body type {type(request_body)} for mediaType {media_type}" - ) - - return serialized_request_body diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py deleted file mode 100644 index 4d608671..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import asyncio -import random -import time -from typing import List - -import httpx - - -class BackoffStrategy: - initial_interval: int - max_interval: int - exponent: float - max_elapsed_time: int - - def __init__( - self, - initial_interval: int, - max_interval: int, - exponent: float, - max_elapsed_time: int, - ): - self.initial_interval = initial_interval - self.max_interval = max_interval - self.exponent = exponent - self.max_elapsed_time = max_elapsed_time - - -class RetryConfig: - strategy: str - backoff: BackoffStrategy - retry_connection_errors: bool - - def __init__( - self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool - ): - self.strategy = strategy - self.backoff = backoff - self.retry_connection_errors = retry_connection_errors - - -class Retries: - config: RetryConfig - status_codes: List[str] - - def __init__(self, config: RetryConfig, status_codes: List[str]): - self.config = config - self.status_codes = status_codes - - -class TemporaryError(Exception): - response: httpx.Response - - def __init__(self, response: httpx.Response): - self.response = response - - -class PermanentError(Exception): - inner: Exception - - def __init__(self, inner: Exception): - self.inner = inner - - -def retry(func, retries: Retries): - if retries.config.strategy == "backoff": - - def do_request() -> httpx.Response: - res: httpx.Response - try: - res = func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return retry_with_backoff( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return func() - - -async def retry_async(func, retries: Retries): - if retries.config.strategy == "backoff": - - async def do_request() -> httpx.Response: - res: httpx.Response - try: - res = await func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return await retry_with_backoff_async( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return await func() - - -def retry_with_backoff( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) - retries += 1 - - -async def retry_with_backoff_async( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return await func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) - await asyncio.sleep(sleep) - retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/mistralai_azure/src/mistralai_azure/utils/security.py deleted file mode 100644 index 295a3f40..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/security.py +++ /dev/null @@ -1,174 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import base64 -from typing import ( - Any, - Dict, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - SecurityMetadata, - find_field_metadata, -) - - -def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: - headers: Dict[str, str] = {} - query_params: Dict[str, List[str]] = {} - - if security is None: - return headers, query_params - - if not isinstance(security, BaseModel): - raise TypeError("security must be a pydantic model") - - sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields - for name in sec_fields: - sec_field = sec_fields[name] - - value = getattr(security, name) - if value is None: - continue - - metadata = find_field_metadata(sec_field, SecurityMetadata) - if metadata is None: - continue - if metadata.option: - _parse_security_option(headers, query_params, value) - return headers, query_params - if metadata.scheme: - # Special case for basic auth or custom auth which could be a flattened model - if metadata.sub_type in ["basic", "custom"] and not isinstance( - value, BaseModel - ): - _parse_security_scheme(headers, query_params, metadata, name, security) - else: - _parse_security_scheme(headers, query_params, metadata, name, value) - - return headers, query_params - - -def _parse_security_option( - headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any -): - if not isinstance(option, BaseModel): - raise TypeError("security option must be a pydantic model") - - opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields - for name in opt_fields: - opt_field = opt_fields[name] - - metadata = find_field_metadata(opt_field, SecurityMetadata) - if metadata is None or not metadata.scheme: - continue - _parse_security_scheme( - headers, query_params, metadata, name, getattr(option, name) - ) - - -def _parse_security_scheme( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - field_name: str, - scheme: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - if isinstance(scheme, BaseModel): - if scheme_type == "http": - if sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return - if sub_type == "custom": - return - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - value = getattr(scheme, name) - - _parse_security_scheme_value( - headers, query_params, scheme_metadata, metadata, name, value - ) - else: - _parse_security_scheme_value( - headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme - ) - - -def _parse_security_scheme_value( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - security_metadata: SecurityMetadata, - field_name: str, - value: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - header_name = security_metadata.get_field_name(field_name) - - if scheme_type == "apiKey": - if sub_type == "header": - headers[header_name] = value - elif sub_type == "query": - query_params[header_name] = [value] - else: - raise ValueError("sub type {sub_type} not supported") - elif scheme_type == "openIdConnect": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "oauth2": - if sub_type != "client_credentials": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "http": - if sub_type == "bearer": - headers[header_name] = _apply_bearer(value) - elif sub_type == "custom": - return - else: - raise ValueError("sub type {sub_type} not supported") - else: - raise ValueError("scheme type {scheme_type} not supported") - - -def _apply_bearer(token: str) -> str: - return token.lower().startswith("bearer ") and token or f"Bearer {token}" - - -def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): - username = "" - password = "" - - if not isinstance(scheme, BaseModel): - raise TypeError("basic auth scheme must be a pydantic model") - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - field_name = metadata.field_name - value = getattr(scheme, name) - - if field_name == "username": - username = value - if field_name == "password": - password = value - - data = f"{username}:{password}".encode() - headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py deleted file mode 100644 index baa41fbd..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ /dev/null @@ -1,219 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -import json -from typing import Any, Dict, List, Union, get_args -import httpx -from typing_extensions import get_origin -from pydantic import ConfigDict, create_model -from pydantic_core import from_json -from typing_inspection.typing_objects import is_union - -from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset - - -def serialize_decimal(as_str: bool): - def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: - return None - if isinstance(d, Unset): - return d - - if not isinstance(d, Decimal): - raise ValueError("Expected Decimal object") - - return str(d) if as_str else float(d) - - return serialize - - -def validate_decimal(d): - if d is None: - return None - - if isinstance(d, (Decimal, Unset)): - return d - - if not isinstance(d, (str, int, float)): - raise ValueError("Expected string, int or float") - - return Decimal(str(d)) - - -def serialize_float(as_str: bool): - def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: - return None - if isinstance(f, Unset): - return f - - if not isinstance(f, float): - raise ValueError("Expected float") - - return str(f) if as_str else f - - return serialize - - -def validate_float(f): - if f is None: - return None - - if isinstance(f, (float, Unset)): - return f - - if not isinstance(f, str): - raise ValueError("Expected string") - - return float(f) - - -def serialize_int(as_str: bool): - def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: - return None - if isinstance(i, Unset): - return i - - if not isinstance(i, int): - raise ValueError("Expected int") - - return str(i) if as_str else i - - return serialize - - -def validate_int(b): - if b is None: - return None - - if isinstance(b, (int, Unset)): - return b - - if not isinstance(b, str): - raise ValueError("Expected string") - - return int(b) - - -def validate_open_enum(is_int: bool): - def validate(e): - if e is None: - return None - - if isinstance(e, Unset): - return e - - if is_int: - if not isinstance(e, int): - raise ValueError("Expected int") - else: - if not isinstance(e, str): - raise ValueError("Expected string") - - return e - - return validate - - -def validate_const(v): - def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: - return None - - if v != c: - raise ValueError(f"Expected {v}") - - return c - - return validate - - -def unmarshal_json(raw, typ: Any) -> Any: - return unmarshal(from_json(raw), typ) - - -def unmarshal(val, typ: Any) -> Any: - unmarshaller = create_model( - "Unmarshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = unmarshaller(body=val) - - # pyright: ignore[reportAttributeAccessIssue] - return m.body # type: ignore - - -def marshal_json(val, typ): - if is_nullable(typ) and val is None: - return "null" - - marshaller = create_model( - "Marshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = marshaller(body=val) - - d = m.model_dump(by_alias=True, mode="json", exclude_none=True) - - if len(d) == 0: - return "" - - return json.dumps(d[next(iter(d))], separators=(",", ":")) - - -def is_nullable(field): - origin = get_origin(field) - if origin is Nullable or origin is OptionalNullable: - return True - - if not origin is Union or type(None) not in get_args(field): - return False - - for arg in get_args(field): - if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: - return True - - return False - - -def stream_to_text(stream: httpx.Response) -> str: - return "".join(stream.iter_text()) - - -async def stream_to_text_async(stream: httpx.Response) -> str: - return "".join([chunk async for chunk in stream.aiter_text()]) - - -def stream_to_bytes(stream: httpx.Response) -> bytes: - return stream.content - - -async def stream_to_bytes_async(stream: httpx.Response) -> bytes: - return await stream.aread() - - -def get_pydantic_model(data: Any, typ: Any) -> Any: - if not _contains_pydantic_model(data): - return unmarshal(data, typ) - - return data - - -def _contains_pydantic_model(data: Any) -> bool: - if isinstance(data, BaseModel): - return True - if isinstance(data, List): - return any(_contains_pydantic_model(item) for item in data) - if isinstance(data, Dict): - return any(_contains_pydantic_model(value) for value in data.values()) - - return False diff --git a/packages/mistralai_gcp/.genignore b/packages/mistralai_gcp/.genignore deleted file mode 100644 index ea10bc8e..00000000 --- a/packages/mistralai_gcp/.genignore +++ /dev/null @@ -1,4 +0,0 @@ -src/mistralai_gcp/sdk.py -README.md -USAGE.md -docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore deleted file mode 100644 index 5a82b069..00000000 --- a/packages/mistralai_gcp/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.speakeasy/reports -README-PYPI.md -.venv/ -venv/ -src/*.egg-info/ -__pycache__/ -.pytest_cache/ -.python-version -.DS_Store -pyrightconfig.json diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock deleted file mode 100644 index 5e157235..00000000 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ /dev/null @@ -1,211 +0,0 @@ -lockVersion: 2.0.0 -id: ec60f2d8-7869-45c1-918e-773d41a8cf74 -management: - docChecksum: 28fe1ab59b4dee005217f2dbbd836060 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 66bf5911f59189922e03a75a72923b32 - published: true -features: - python: - additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.5 - core: 5.12.3 - defaultEnabledRetries: 0.2.0 - enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.2 - examples: 3.0.1 - flatRequests: 1.0.1 - globalSecurity: 3.0.3 - globalSecurityCallbacks: 1.0.0 - globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 - methodArguments: 1.0.2 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.0 - responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.0.1 - serverEvents: 1.0.7 - serverEventsSentinels: 0.1.0 - serverIDs: 3.0.0 - unions: 3.0.4 -generatedFiles: - - .gitattributes - - .python-version - - .vscode/settings.json - - docs/models/arguments.md - - docs/models/assistantmessage.md - - docs/models/assistantmessagecontent.md - - docs/models/assistantmessagerole.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionchoicefinishreason.md - - docs/models/chatcompletionrequest.md - - docs/models/chatcompletionrequestmessages.md - - docs/models/chatcompletionrequeststop.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/completionchunk.md - - docs/models/completionevent.md - - docs/models/completionresponsestreamchoice.md - - docs/models/content.md - - docs/models/contentchunk.md - - docs/models/deltamessage.md - - docs/models/fimcompletionrequest.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/finishreason.md - - docs/models/function.md - - docs/models/functioncall.md - - docs/models/functionname.md - - docs/models/httpvalidationerror.md - - docs/models/imageurl.md - - docs/models/imageurlchunk.md - - docs/models/imageurlchunkimageurl.md - - docs/models/imageurlchunktype.md - - docs/models/jsonschema.md - - docs/models/loc.md - - docs/models/messages.md - - docs/models/prediction.md - - docs/models/referencechunk.md - - docs/models/referencechunktype.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/role.md - - docs/models/security.md - - docs/models/stop.md - - docs/models/systemmessage.md - - docs/models/systemmessagecontent.md - - docs/models/textchunk.md - - docs/models/tool.md - - docs/models/toolcall.md - - docs/models/toolchoice.md - - docs/models/toolchoiceenum.md - - docs/models/toolmessage.md - - docs/models/toolmessagecontent.md - - docs/models/toolmessagerole.md - - docs/models/tooltypes.md - - docs/models/type.md - - docs/models/usageinfo.md - - docs/models/usermessage.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/utils/retryconfig.md - - docs/models/validationerror.md - - poetry.toml - - py.typed - - pylintrc - - pyproject.toml - - scripts/prepare_readme.py - - scripts/publish.sh - - src/mistralai_gcp/__init__.py - - src/mistralai_gcp/_hooks/__init__.py - - src/mistralai_gcp/_hooks/sdkhooks.py - - src/mistralai_gcp/_hooks/types.py - - src/mistralai_gcp/_version.py - - src/mistralai_gcp/basesdk.py - - src/mistralai_gcp/chat.py - - src/mistralai_gcp/fim.py - - src/mistralai_gcp/httpclient.py - - src/mistralai_gcp/models/__init__.py - - src/mistralai_gcp/models/assistantmessage.py - - src/mistralai_gcp/models/chatcompletionchoice.py - - src/mistralai_gcp/models/chatcompletionrequest.py - - src/mistralai_gcp/models/chatcompletionresponse.py - - src/mistralai_gcp/models/chatcompletionstreamrequest.py - - src/mistralai_gcp/models/completionchunk.py - - src/mistralai_gcp/models/completionevent.py - - src/mistralai_gcp/models/completionresponsestreamchoice.py - - src/mistralai_gcp/models/contentchunk.py - - src/mistralai_gcp/models/deltamessage.py - - src/mistralai_gcp/models/fimcompletionrequest.py - - src/mistralai_gcp/models/fimcompletionresponse.py - - src/mistralai_gcp/models/fimcompletionstreamrequest.py - - src/mistralai_gcp/models/function.py - - src/mistralai_gcp/models/functioncall.py - - src/mistralai_gcp/models/functionname.py - - src/mistralai_gcp/models/httpvalidationerror.py - - src/mistralai_gcp/models/imageurl.py - - src/mistralai_gcp/models/imageurlchunk.py - - src/mistralai_gcp/models/jsonschema.py - - src/mistralai_gcp/models/prediction.py - - src/mistralai_gcp/models/referencechunk.py - - src/mistralai_gcp/models/responseformat.py - - src/mistralai_gcp/models/responseformats.py - - src/mistralai_gcp/models/sdkerror.py - - src/mistralai_gcp/models/security.py - - src/mistralai_gcp/models/systemmessage.py - - src/mistralai_gcp/models/textchunk.py - - src/mistralai_gcp/models/tool.py - - src/mistralai_gcp/models/toolcall.py - - src/mistralai_gcp/models/toolchoice.py - - src/mistralai_gcp/models/toolchoiceenum.py - - src/mistralai_gcp/models/toolmessage.py - - src/mistralai_gcp/models/tooltypes.py - - src/mistralai_gcp/models/usageinfo.py - - src/mistralai_gcp/models/usermessage.py - - src/mistralai_gcp/models/validationerror.py - - src/mistralai_gcp/py.typed - - src/mistralai_gcp/sdk.py - - src/mistralai_gcp/sdkconfiguration.py - - src/mistralai_gcp/types/__init__.py - - src/mistralai_gcp/types/basemodel.py - - src/mistralai_gcp/utils/__init__.py - - src/mistralai_gcp/utils/annotations.py - - src/mistralai_gcp/utils/enums.py - - src/mistralai_gcp/utils/eventstreaming.py - - src/mistralai_gcp/utils/forms.py - - src/mistralai_gcp/utils/headers.py - - src/mistralai_gcp/utils/logger.py - - src/mistralai_gcp/utils/metadata.py - - src/mistralai_gcp/utils/queryparams.py - - src/mistralai_gcp/utils/requestbodies.py - - src/mistralai_gcp/utils/retries.py - - src/mistralai_gcp/utils/security.py - - src/mistralai_gcp/utils/serializers.py - - src/mistralai_gcp/utils/url.py - - src/mistralai_gcp/utils/values.py -examples: - stream_chat: - speakeasy-default-stream-chat: - requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "422": - application/json: {} - "200": {} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: - requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} - stream_fim: - speakeasy-default-stream-fim: - requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} - responses: - "422": - application/json: {} - "200": {} - fim_completion_v1_fim_completions_post: - speakeasy-default-fim-completion-v1-fim-completions-post: - requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} -examplesVersion: 1.0.0 -generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml deleted file mode 100644 index d7be7fed..00000000 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ /dev/null @@ -1,53 +0,0 @@ -configVersion: 2.0.0 -generation: - sdkClassName: MistralGCP - maintainOpenAPIOrder: true - usageSnippets: - optionalPropertyRendering: withExample - useClassNamesForArrayFields: true - fixes: - nameResolutionDec2023: true - nameResolutionFeb2025: false - parameterOrderingFeb2024: true - requestResponseComponentNamesFeb2024: true - securityFeb2025: false - auth: - oAuth2ClientCredentialsEnabled: true - oAuth2PasswordEnabled: false -python: - version: 1.6.0 - additionalDependencies: - dev: - pytest: ^8.2.2 - pytest-asyncio: ^0.23.7 - main: - google-auth: ^2.31.0 - requests: ^2.32.3 - authors: - - Mistral - clientServerStatusCodesAsErrors: true - defaultErrorName: SDKError - description: Python Client SDK for the Mistral AI API in GCP. - enableCustomCodeRegions: false - enumFormat: union - fixFlags: - responseRequiredSep2024: false - flattenGlobalSecurity: true - flattenRequests: true - flatteningOrder: parameters-first - imports: - option: openapi - paths: - callbacks: "" - errors: "" - operations: "" - shared: "" - webhooks: "" - inputModelSuffix: input - maxMethodParams: 15 - methodArguments: infer-optional-args - outputModelSuffix: output - packageName: mistralai-gcp - pytestTimeout: 0 - responseFormat: flat - templateVersion: v2 diff --git a/packages/mistralai_gcp/.vscode/settings.json b/packages/mistralai_gcp/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/packages/mistralai_gcp/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/packages/mistralai_gcp/README.md b/packages/mistralai_gcp/README.md deleted file mode 100644 index a4233244..00000000 --- a/packages/mistralai_gcp/README.md +++ /dev/null @@ -1,425 +0,0 @@ -# Mistral on GCP Python Client - - -**Prerequisites** - -Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). - -To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running - -```bash -gcloud auth application-default login -``` - -## SDK Installation - -Install the extras dependencies specific to Google Cloud: - -```bash -pip install mistralai[gcp] -``` - - -## SDK Example Usage - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_gcp import MistralGCP -import os -) - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_gcp import MistralGCP -import os - -async def main(): - s = MistralGCP( - api_key=os.getenv("API_KEY", ""), - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="mistral-small-latest") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - - - -## Available Resources and Operations - -### [chat](docs/sdks/chat/README.md) - -* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -* [create](docs/sdks/chat/README.md#create) - Chat Completion - -### [fim](docs/sdks/fim/README.md) - -* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -* [create](docs/sdks/fim/README.md#create) - Fim Completion - - - -## Server-sent event streaming - -[Server-sent events][mdn-sse] are used to stream content from certain -operations. These operations will expose the stream as [Generator][generator] that -can be consumed using a simple `for` loop. The loop will -terminate when the server no longer has any events to send and closes the -underlying connection. - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events -[generator]: https://wiki.python.org/moin/Generators - - - -## Retries - -Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. - -To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: -```python -from mistralai_gcp import MistralGCP -from mistralgcp.utils import BackoffStrategy, RetryConfig -import os - -s = MistralGCP() - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest", - retries=RetryConfig( - "backoff", - BackoffStrategy(1, 50, 1.1, 100), - False - ) -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: -```python -from mistralai_gcp import MistralGCP -from mistralgcp.utils import BackoffStrategy, RetryConfig -import os - -s = MistralGCP( - retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), -) - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - - -## Error Handling - -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. - -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - -### Example - -```python -from mistralai_gcp import MistralGCP, models -import os - -s = MistralGCP() - -res = None -try: - res = s.chat.complete( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" - ) - -except models.HTTPValidationError as e: - # handle exception - raise(e) -except models.SDKError as e: - # handle exception - raise(e) - -if res is not None: - # handle response - pass - -``` - - - -## Server Selection - -### Select Server by Name - -You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: - -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | - -#### Example - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP(server="prod") - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - -### Override Server URL Per-Client - -The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP( - server_url="https://api.mistral.ai", -) - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - - -## Custom HTTP Client - -The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. -Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. -This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. - -For example, you could specify a header for every request that this sdk makes as follows: -```python -from mistralai_gcp import MistralGCP -import httpx - -http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = MistralGCP(client=http_client) -``` - -or you could wrap the client with your own custom logic: -```python -from mistralai_gcp import MistralGCP -from mistralai_gcp.httpclient import AsyncHttpClient -import httpx - -class CustomClient(AsyncHttpClient): - client: AsyncHttpClient - - def __init__(self, client: AsyncHttpClient): - self.client = client - - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - request.headers["Client-Level-Header"] = "added by client" - - return await self.client.send( - request, stream=stream, auth=auth, follow_redirects=follow_redirects - ) - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - return self.client.build_request( - method, - url, - content=content, - data=data, - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - extensions=extensions, - ) - -s = MistralGCP(async_client=CustomClient(httpx.AsyncClient())) -``` - - - -## Authentication - -### Per-Client Security Schemes - -This SDK supports the following security scheme globally: - -| Name | Type | Scheme | -| --------- | ---- | ----------- | -| `api_key` | http | HTTP Bearer | - -To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - - - -# Development - -## Contributions - -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_gcp/USAGE.md b/packages/mistralai_gcp/USAGE.md deleted file mode 100644 index 30fa08aa..00000000 --- a/packages/mistralai_gcp/USAGE.md +++ /dev/null @@ -1,51 +0,0 @@ - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_gcp import MistralGCP -import os - -async def main(): - s = MistralGCP( - api_key=os.getenv("API_KEY", ""), - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="mistral-small-latest") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md deleted file mode 100644 index 3d0bd90b..00000000 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ /dev/null @@ -1,11 +0,0 @@ -# AssistantMessage - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagerole.md b/packages/mistralai_gcp/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/packages/mistralai_gcp/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md deleted file mode 100644 index 9d735d08..00000000 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ /dev/null @@ -1,23 +0,0 @@ -# ChatCompletionRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md b/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md deleted file mode 100644 index bc7708a6..00000000 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md deleted file mode 100644 index 1646528d..00000000 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md +++ /dev/null @@ -1,17 +0,0 @@ -# ChatCompletionRequestToolChoice - - -## Supported Types - -### `models.ToolChoice` - -```python -value: models.ToolChoice = /* values here */ -``` - -### `models.ToolChoiceEnum` - -```python -value: models.ToolChoiceEnum = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md deleted file mode 100644 index ad376158..00000000 --- a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md +++ /dev/null @@ -1,13 +0,0 @@ -# ChatCompletionResponse - - -## Fields - -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | -| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | -| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | -| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md deleted file mode 100644 index 827943cd..00000000 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ /dev/null @@ -1,23 +0,0 @@ -# ChatCompletionStreamRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md deleted file mode 100644 index cce0ca3e..00000000 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md +++ /dev/null @@ -1,17 +0,0 @@ -# ChatCompletionStreamRequestToolChoice - - -## Supported Types - -### `models.ToolChoice` - -```python -value: models.ToolChoice = /* values here */ -``` - -### `models.ToolChoiceEnum` - -```python -value: models.ToolChoiceEnum = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md deleted file mode 100644 index b8ae6a09..00000000 --- a/packages/mistralai_gcp/docs/models/completionchunk.md +++ /dev/null @@ -1,13 +0,0 @@ -# CompletionChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md deleted file mode 100644 index c807dacd..00000000 --- a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionResponseStreamChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | -| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | -| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/mistralai_gcp/docs/models/content.md deleted file mode 100644 index a833dc2c..00000000 --- a/packages/mistralai_gcp/docs/models/content.md +++ /dev/null @@ -1,17 +0,0 @@ -# Content - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[models.ContentChunk]` - -```python -value: List[models.ContentChunk] = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md deleted file mode 100644 index 61deabbf..00000000 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeltaMessage - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/finishreason.md b/packages/mistralai_gcp/docs/models/finishreason.md deleted file mode 100644 index 45a5aedb..00000000 --- a/packages/mistralai_gcp/docs/models/finishreason.md +++ /dev/null @@ -1,11 +0,0 @@ -# FinishReason - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `STOP` | stop | -| `LENGTH` | length | -| `ERROR` | error | -| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md deleted file mode 100644 index a166b7bb..00000000 --- a/packages/mistralai_gcp/docs/models/function.md +++ /dev/null @@ -1,11 +0,0 @@ -# Function - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurl.md b/packages/mistralai_gcp/docs/models/imageurl.md deleted file mode 100644 index 7c2bcbc3..00000000 --- a/packages/mistralai_gcp/docs/models/imageurl.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImageURL - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunk.md b/packages/mistralai_gcp/docs/models/imageurlchunk.md deleted file mode 100644 index f1b926ef..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunk.md +++ /dev/null @@ -1,11 +0,0 @@ -# ImageURLChunk - -{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md b/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md deleted file mode 100644 index 76738908..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md +++ /dev/null @@ -1,17 +0,0 @@ -# ImageURLChunkImageURL - - -## Supported Types - -### `models.ImageURL` - -```python -value: models.ImageURL = /* values here */ -``` - -### `str` - -```python -value: str = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/imageurlchunktype.md b/packages/mistralai_gcp/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md deleted file mode 100644 index ae387867..00000000 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ /dev/null @@ -1,11 +0,0 @@ -# JSONSchema - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/messages.md b/packages/mistralai_gcp/docs/models/messages.md deleted file mode 100644 index 1d394500..00000000 --- a/packages/mistralai_gcp/docs/models/messages.md +++ /dev/null @@ -1,29 +0,0 @@ -# Messages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md deleted file mode 100644 index 86e9c396..00000000 --- a/packages/mistralai_gcp/docs/models/prediction.md +++ /dev/null @@ -1,9 +0,0 @@ -# Prediction - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunk.md b/packages/mistralai_gcp/docs/models/referencechunk.md deleted file mode 100644 index a132ca2f..00000000 --- a/packages/mistralai_gcp/docs/models/referencechunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReferenceChunk - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunktype.md b/packages/mistralai_gcp/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/packages/mistralai_gcp/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md deleted file mode 100644 index 23a1641b..00000000 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ /dev/null @@ -1,9 +0,0 @@ -# ResponseFormat - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md deleted file mode 100644 index 06886afe..00000000 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ /dev/null @@ -1,12 +0,0 @@ -# ResponseFormats - -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `TEXT` | text | -| `JSON_OBJECT` | json_object | -| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/role.md b/packages/mistralai_gcp/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/packages/mistralai_gcp/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/stop.md b/packages/mistralai_gcp/docs/models/stop.md deleted file mode 100644 index ba40ca83..00000000 --- a/packages/mistralai_gcp/docs/models/stop.md +++ /dev/null @@ -1,19 +0,0 @@ -# Stop - -Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[str]` - -```python -value: List[str] = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/mistralai_gcp/docs/models/systemmessage.md deleted file mode 100644 index 0dba71c0..00000000 --- a/packages/mistralai_gcp/docs/models/systemmessage.md +++ /dev/null @@ -1,9 +0,0 @@ -# SystemMessage - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/mistralai_gcp/docs/models/systemmessagecontent.md deleted file mode 100644 index e0d27d9f..00000000 --- a/packages/mistralai_gcp/docs/models/systemmessagecontent.md +++ /dev/null @@ -1,17 +0,0 @@ -# SystemMessageContent - - -## Supported Types - -### `str` - -```python -value: str = /* values here */ -``` - -### `List[models.TextChunk]` - -```python -value: List[models.TextChunk] = /* values here */ -``` - diff --git a/packages/mistralai_gcp/docs/models/textchunk.md b/packages/mistralai_gcp/docs/models/textchunk.md deleted file mode 100644 index 6daab3c3..00000000 --- a/packages/mistralai_gcp/docs/models/textchunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# TextChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md deleted file mode 100644 index 822f86f8..00000000 --- a/packages/mistralai_gcp/docs/models/tool.md +++ /dev/null @@ -1,9 +0,0 @@ -# Tool - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md deleted file mode 100644 index 574be1ea..00000000 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ /dev/null @@ -1,11 +0,0 @@ -# ToolCall - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md deleted file mode 100644 index 792ebcd6..00000000 --- a/packages/mistralai_gcp/docs/models/toolchoice.md +++ /dev/null @@ -1,11 +0,0 @@ -# ToolChoice - -ToolChoice is either a ToolChoiceEnum or a ToolChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessage.md b/packages/mistralai_gcp/docs/models/toolmessage.md deleted file mode 100644 index a54f4933..00000000 --- a/packages/mistralai_gcp/docs/models/toolmessage.md +++ /dev/null @@ -1,11 +0,0 @@ -# ToolMessage - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | -| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagerole.md b/packages/mistralai_gcp/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/packages/mistralai_gcp/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/type.md b/packages/mistralai_gcp/docs/models/type.md deleted file mode 100644 index eb0581e7..00000000 --- a/packages/mistralai_gcp/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md deleted file mode 100644 index 9f56a3ae..00000000 --- a/packages/mistralai_gcp/docs/models/usageinfo.md +++ /dev/null @@ -1,10 +0,0 @@ -# UsageInfo - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/mistralai_gcp/docs/models/usermessage.md deleted file mode 100644 index 63b01310..00000000 --- a/packages/mistralai_gcp/docs/models/usermessage.md +++ /dev/null @@ -1,9 +0,0 @@ -# UserMessage - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagerole.md b/packages/mistralai_gcp/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/packages/mistralai_gcp/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/sdks/chat/README.md b/packages/mistralai_gcp/docs/sdks/chat/README.md deleted file mode 100644 index 6f5f1977..00000000 --- a/packages/mistralai_gcp/docs/sdks/chat/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# Chat -(*chat*) - -## Overview - -Chat Completion API. - -### Available Operations - -* [stream](#stream) - Stream chat completion -* [create](#create) - Chat Completion - -## stream - -Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - -### Example Usage - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -### Parameters - -| Parameter | Type | Required | Description | Example | -| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - - -### Response - -**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** -### Errors - -| Error Object | Status Code | Content Type | -| --------------- | ----------- | ------------ | -| models.SDKError | 4xx-5xx | */* | - -## create - -Chat Completion - -### Example Usage - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - # handle response - pass - -``` - -### Parameters - -| Parameter | Type | Required | Description | Example | -| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - - -### Response - -**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** -### Errors - -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/docs/sdks/fim/README.md b/packages/mistralai_gcp/docs/sdks/fim/README.md deleted file mode 100644 index b997fabf..00000000 --- a/packages/mistralai_gcp/docs/sdks/fim/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Fim -(*fim*) - -## Overview - -Fill-in-the-middle API. - -### Available Operations - -* [stream](#stream) - Stream fim completion -* [create](#create) - Fim Completion - -## stream - -Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - -### Example Usage - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.fim.stream(prompt="def", model="codestral-2405", suffix="return a+b") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - -### Parameters - -| Parameter | Type | Required | Description | Example | -| ------------- | ------------------------------------------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - - -### Response - -**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** -### Errors - -| Error Object | Status Code | Content Type | -| --------------- | ----------- | ------------ | -| models.SDKError | 4xx-5xx | */* | - -## create - -FIM completion. - -### Example Usage - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.fim.complete(prompt="def", model="codestral-2405", suffix="return a+b") - -if res is not None: - # handle response - pass - -``` - -### Parameters - -| Parameter | Type | Required | Description | Example | -| ------------- | ------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - - -### Response - -**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** -### Errors - -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock deleted file mode 100644 index 5f710a98..00000000 --- a/packages/mistralai_gcp/poetry.lock +++ /dev/null @@ -1,900 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[[package]] -name = "anyio" -version = "4.4.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] -trio = ["trio (>=0.23)"] - -[[package]] -name = "astroid" -version = "3.2.4" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "cachetools" -version = "5.4.0" -description = "Extensible memoizing collections and decorators" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, - {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, -] - -[[package]] -name = "certifi" -version = "2024.7.4" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -groups = ["main"] -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "dill" -version = "0.3.8" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "eval-type-backport" -version = "0.2.0" -description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, - {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "google-auth" -version = "2.38.0" -description = "Google Authentication Library" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, - {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = ">=3.1.4,<5" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography", "pyopenssl"] -pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0.dev0)"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.5" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] - -[[package]] -name = "httpx" -version = "0.28.1" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" - -[package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "idna" -version = "3.7" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -groups = ["main"] -files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mypy" -version = "1.14.1" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, -] - -[package.dependencies] -mypy_extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -faster-cache = ["orjson"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["dev"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "packaging" -version = "24.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, -] - -[[package]] -name = "platformdirs" -version = "4.2.2" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, -] - -[package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pyasn1" -version = "0.6.0" -description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.0" -description = "A collection of ASN.1-based protocols modules" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, -] - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.7.0" - -[[package]] -name = "pydantic" -version = "2.10.6" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, -] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" -typing-extensions = ">=4.12.2" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] - -[[package]] -name = "pydantic-core" -version = "2.27.2" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pylint" -version = "3.2.3" -description = "python code static checker" -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, -] - -[package.dependencies] -astroid = ">=3.2.2,<=3.3.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, -] -isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pytest" -version = "8.3.2" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.23.8" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, -] - -[package.dependencies] -pytest = ">=7.0.0,<9" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rsa" -version = "4.2" -description = "Pure-Python RSA implementation" -optional = false -python-versions = "*" -groups = ["main"] -markers = "python_version >= \"3.12\"" -files = [ - {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "rsa" -version = "4.9" -description = "Pure-Python RSA implementation" -optional = false -python-versions = ">=3.6,<4" -groups = ["main"] -markers = "python_version <= \"3.11\"" -files = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.0" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, -] - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20240316" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, - {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -] - -[package.dependencies] -typing-extensions = ">=4.12.0" - -[[package]] -name = "urllib3" -version = "2.2.2" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[metadata] -lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "e393da36a5d5edb020e739f40ff611854b9940e11a34a4e221f3f1513efeb9db" diff --git a/packages/mistralai_gcp/poetry.toml b/packages/mistralai_gcp/poetry.toml deleted file mode 100644 index ab1033bd..00000000 --- a/packages/mistralai_gcp/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc deleted file mode 100644 index 266bc815..00000000 --- a/packages/mistralai_gcp/pylintrc +++ /dev/null @@ -1,662 +0,0 @@ -[MAIN] - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Clear in-memory caches upon conclusion of linting. Useful if running pylint -# in a server-like mode. -clear-cache-post-run=no - -# Load and enable all available extensions. Use --list-extensions to see a list -# all available extensions. -#enable-all-extensions= - -# In error mode, messages with a category besides ERROR or FATAL are -# suppressed, and no reports are done by default. Error mode is compatible with -# disabling specific errors. -#errors-only= - -# Always return a 0 (non-error) status code, even if lint errors are found. -# This is primarily useful in continuous integration scripts. -#exit-zero= - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. -extension-pkg-allow-list= - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. (This is an alternative name to extension-pkg-allow-list -# for backward compatibility.) -extension-pkg-whitelist= - -# Return non-zero exit code if any of these messages/categories are detected, -# even if score is above --fail-under value. Syntax same as enable. Messages -# specified are enabled, while categories only check already-enabled messages. -fail-on= - -# Specify a score threshold under which the program will exit with error. -fail-under=10 - -# Interpret the stdin as a python script, whose filename needs to be passed as -# the module_or_package argument. -#from-stdin= - -# Files or directories to be skipped. They should be base names, not paths. -ignore=CVS - -# Add files or directories matching the regular expressions patterns to the -# ignore-list. The regex matches against paths and can be in Posix or Windows -# format. Because '\\' represents the directory delimiter on Windows systems, -# it can't be used as an escape character. -ignore-paths= - -# Files or directories matching the regular expression patterns are skipped. -# The regex matches against base names, not paths. The default value ignores -# Emacs file locks -ignore-patterns=^\.# - -# List of module names for which member attributes should not be checked and -# will not be imported (useful for modules/projects where namespaces are -# manipulated during runtime and thus existing member attributes cannot be -# deduced by static analysis). It supports qualified module names, as well as -# Unix pattern matching. -ignored-modules= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the -# number of processors available to use, and will cap the count on Windows to -# avoid hangs. -jobs=1 - -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 - -# List of plugins (as comma separated values of python module names) to load, -# usually to register additional checkers. -load-plugins= - -# Pickle collected data for later comparisons. -persistent=yes - -# Minimum Python version to use for version dependent checks. Will default to -# the version used to run pylint. -py-version=3.9 - -# Discover python modules and packages in the file system subtree. -recursive=no - -# Add paths to the list of the source roots. Supports globbing patterns. The -# source root is an absolute path or a path relative to the current working -# directory used to determine a package namespace for modules located under the -# source root. -source-roots=src - -# When enabled, pylint would attempt to guess common misconfiguration and emit -# user-friendly hints instead of false-positive error messages. -suggestion-mode=yes - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# In verbose mode, extra non-checker-related info will be displayed. -#verbose= - - -[BASIC] - -# Naming style matching correct argument names. -argument-naming-style=snake_case - -# Regular expression matching correct argument names. Overrides argument- -# naming-style. If left empty, argument names will be checked with the set -# naming style. -#argument-rgx= - -# Naming style matching correct attribute names. -#attr-naming-style=snake_case - -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. If left empty, attribute names will be checked with the set naming -# style. -attr-rgx=[^\W\d][^\W]*|__.*__$ - -# Bad variable names which should always be refused, separated by a comma. -bad-names= - -# Bad variable names regexes, separated by a comma. If names match any regex, -# they will always be refused -bad-names-rgxs= - -# Naming style matching correct class attribute names. -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style. If left empty, class attribute names will be checked -# with the set naming style. -#class-attribute-rgx= - -# Naming style matching correct class constant names. -class-const-naming-style=UPPER_CASE - -# Regular expression matching correct class constant names. Overrides class- -# const-naming-style. If left empty, class constant names will be checked with -# the set naming style. -#class-const-rgx= - -# Naming style matching correct class names. -class-naming-style=PascalCase - -# Regular expression matching correct class names. Overrides class-naming- -# style. If left empty, class names will be checked with the set naming style. -#class-rgx= - -# Naming style matching correct constant names. -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names. Overrides const-naming- -# style. If left empty, constant names will be checked with the set naming -# style. -#const-rgx= - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming style matching correct function names. -function-naming-style=snake_case - -# Regular expression matching correct function names. Overrides function- -# naming-style. If left empty, function names will be checked with the set -# naming style. -#function-rgx= - -# Good variable names which should always be accepted, separated by a comma. -good-names=i, - j, - k, - ex, - Run, - _, - e, - n, - id - -# Good variable names regexes, separated by a comma. If names match any regex, -# they will always be accepted -good-names-rgxs= - -# Include a hint for the correct naming format with invalid-name. -include-naming-hint=no - -# Naming style matching correct inline iteration names. -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style. If left empty, inline iteration names will be checked -# with the set naming style. -#inlinevar-rgx= - -# Naming style matching correct method names. -method-naming-style=snake_case - -# Regular expression matching correct method names. Overrides method-naming- -# style. If left empty, method names will be checked with the set naming style. -#method-rgx= - -# Naming style matching correct module names. -module-naming-style=snake_case - -# Regular expression matching correct module names. Overrides module-naming- -# style. If left empty, module names will be checked with the set naming style. -#module-rgx= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -# These decorators are taken in consideration only for invalid-name. -property-classes=abc.abstractproperty - -# Regular expression matching correct type alias names. If left empty, type -# alias names will be checked with the set naming style. -typealias-rgx=.* - -# Regular expression matching correct type variable names. If left empty, type -# variable names will be checked with the set naming style. -#typevar-rgx= - -# Naming style matching correct variable names. -variable-naming-style=snake_case - -# Regular expression matching correct variable names. Overrides variable- -# naming-style. If left empty, variable names will be checked with the set -# naming style. -#variable-rgx= - - -[CLASSES] - -# Warn about protected attribute access inside special methods -check-protected-access-in-special-methods=no - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp, - asyncSetUp, - __post_init__ - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[DESIGN] - -# List of regular expressions of class ancestor names to ignore when counting -# public methods (see R0903) -exclude-too-few-public-methods= - -# List of qualified class names to ignore when counting class parents (see -# R0901) -ignored-parents= - -# Maximum number of arguments for function / method. -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in an if statement (see R0916). -max-bool-expr=5 - -# Maximum number of branch for function / method body. -max-branches=12 - -# Maximum number of locals for function / method body. -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=25 - -# Maximum number of return / yield for function / method body. -max-returns=6 - -# Maximum number of statements in function / method body. -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when caught. -overgeneral-exceptions=builtins.BaseException,builtins.Exception - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=100 - -# Maximum number of lines in a module. -max-module-lines=1000 - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - - -[IMPORTS] - -# List of modules that can be imported at any level, not just the top level -# one. -allow-any-import-level= - -# Allow explicit reexports by alias from a package __init__. -allow-reexport-from-package=no - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Deprecated modules which should not be used, separated by a comma. -deprecated-modules= - -# Output a graph (.gv or any supported image format) of external dependencies -# to the given file (report RP0402 must not be disabled). -ext-import-graph= - -# Output a graph (.gv or any supported image format) of all (i.e. internal and -# external) dependencies to the given file (report RP0402 must not be -# disabled). -import-graph= - -# Output a graph (.gv or any supported image format) of internal dependencies -# to the given file (report RP0402 must not be disabled). -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Couples of modules and preferred modules, separated by a comma. -preferred-modules= - - -[LOGGING] - -# The type of string formatting that logging methods do. `old` means using % -# formatting, `new` is for `{}` formatting. -logging-format-style=old - -# Logging modules to check that the string format arguments are in logging -# function parameter format. -logging-modules=logging - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, -# UNDEFINED. -confidence=HIGH, - CONTROL_FLOW, - INFERENCE, - INFERENCE_FAILURE, - UNDEFINED - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then re-enable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use "--disable=all --enable=classes -# --disable=W". -disable=raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, - suppressed-message, - useless-suppression, - deprecated-pragma, - use-implicit-booleaness-not-comparison-to-string, - use-implicit-booleaness-not-comparison-to-zero, - use-symbolic-message-instead, - trailing-whitespace, - line-too-long, - missing-class-docstring, - missing-module-docstring, - missing-function-docstring, - too-many-instance-attributes, - wrong-import-order, - too-many-arguments, - broad-exception-raised, - too-few-public-methods, - too-many-branches, - duplicate-code, - trailing-newlines, - too-many-public-methods, - too-many-locals, - too-many-lines, - using-constant-test, - too-many-statements, - cyclic-import, - too-many-nested-blocks, - too-many-boolean-expressions, - no-else-raise, - bare-except, - broad-exception-caught, - fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable= - - -[METHOD_ARGS] - -# List of qualified names (i.e., library.method) which require a timeout -# parameter e.g. 'requests.api.get,requests.api.post' -timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO - -# Regular expression of note tags to take in consideration. -notes-rgx= - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=sys.exit,argparse.parse_error - - -[REPORTS] - -# Python expression which should return a score less than or equal to 10. You -# have access to the variables 'fatal', 'error', 'warning', 'refactor', -# 'convention', and 'info' which contain the number of messages in each -# category, as well as 'statement' which is the total number of statements -# analyzed. This score is used by the global evaluation report (RP0004). -evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details. -msg-template= - -# Set the output format. Available formats are: text, parseable, colorized, -# json2 (improved json format), json (old json format) and msvs (visual -# studio). You can also give a reporter class, e.g. -# mypackage.mymodule.MyReporterClass. -#output-format= - -# Tells whether to display a full report or only the messages. -reports=no - -# Activate the evaluation score. -score=yes - - -[SIMILARITIES] - -# Comments are removed from the similarity computation -ignore-comments=yes - -# Docstrings are removed from the similarity computation -ignore-docstrings=yes - -# Imports are removed from the similarity computation -ignore-imports=yes - -# Signatures are removed from the similarity computation -ignore-signatures=yes - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[SPELLING] - -# Limits count of emitted suggestions for spelling mistakes. -max-spelling-suggestions=4 - -# Spelling dictionary name. No available dictionaries : You need to install -# both the python package and the system dependency for enchant to work. -spelling-dict= - -# List of comma separated words that should be considered directives if they -# appear at the beginning of a comment and should not be checked. -spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains the private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to the private dictionary (see the -# --spelling-private-dict-file option) instead of raising a message. -spelling-store-unknown-words=no - - -[STRING] - -# This flag controls whether inconsistent-quotes generates a warning when the -# character used as a quote delimiter is used inconsistently within a module. -check-quote-consistency=no - -# This flag controls whether the implicit-str-concat should generate a warning -# on implicit string concatenation in sequences defined over several lines. -check-str-concat-over-line-jumps=no - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# Tells whether to warn about missing members when the owner of the attribute -# is inferred to be None. -ignore-none=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of symbolic message names to ignore for Mixin members. -ignored-checks-for-mixins=no-member, - not-async-context-manager, - not-context-manager, - attribute-defined-outside-init - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - -# Regex pattern to define which classes are considered mixins. -mixin-class-rgx=.*[Mm]ixin - -# List of decorators that change the signature of a decorated function. -signature-mutators= - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of names allowed to shadow builtins -allowed-redefined-builtins=id,object - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb - -# A regular expression matching the name of dummy variables (i.e. expected to -# not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml deleted file mode 100644 index 9763e417..00000000 --- a/packages/mistralai_gcp/pyproject.toml +++ /dev/null @@ -1,60 +0,0 @@ -[project] -name = "mistralai-gcp" -version = "1.6.0" -description = "Python Client SDK for the Mistral AI API in GCP." -authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "google-auth (>=2.31.0,<3.0.0)", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "requests (>=2.32.3,<3.0.0)", - "typing-inspection >=0.4.0", -] - -[tool.poetry] -packages = [ - { include = "mistralai_gcp", from = "src" } -] -include = ["py.typed", "src/mistralai_gcp/py.typed"] - -[tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_gcp/py.typed"] - -[virtualenvs] -in-project = true - -[tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - -[tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" -pythonpath = ["src"] - -[tool.mypy] -disable_error_code = "misc" - -[[tool.mypy.overrides]] -module = "typing_inspect" -ignore_missing_imports = true - -[[tool.mypy.overrides]] -module = "jsonpath" -ignore_missing_imports = true - -[tool.pyright] -venvPath = "." -venv = ".venv" - - diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py deleted file mode 100644 index 825d9ded..00000000 --- a/packages/mistralai_gcp/scripts/prepare_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import shutil - -try: - shutil.copyfile("README.md", "README-PYPI.md") -except Exception as e: - print("Failed to copy README.md to README-PYPI.md") - print(e) diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh deleted file mode 100755 index f2f2cf2c..00000000 --- a/packages/mistralai_gcp/scripts/publish.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} - -poetry run python scripts/prepare_readme.py - -poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py deleted file mode 100644 index dd02e42e..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) -from .sdk import * -from .sdkconfiguration import * -from .models import * - - -VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py deleted file mode 100644 index 77df6aef..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py +++ /dev/null @@ -1,22 +0,0 @@ -# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py -from typing import Union - -import httpx - -from .types import BeforeRequestContext, BeforeRequestHook - -PREFIX = "mistral-client-python/" - -class CustomUserAgentHook(BeforeRequestHook): - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - current = request.headers["user-agent"] - if current.startswith(PREFIX): - return request - - request.headers["user-agent"] = ( - PREFIX + current.split(" ")[1] - ) - - return request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py deleted file mode 100644 index 304edfa2..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py +++ /dev/null @@ -1,15 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py deleted file mode 100644 index b81c2a27..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from .types import ( - SDKInitHook, - BeforeRequestContext, - BeforeRequestHook, - AfterSuccessContext, - AfterSuccessHook, - AfterErrorContext, - AfterErrorHook, - Hooks, -) -from .registration import init_hooks -from typing import List, Optional, Tuple -from mistralai_gcp.httpclient import HttpClient - - -class SDKHooks(Hooks): - def __init__(self) -> None: - self.sdk_init_hooks: List[SDKInitHook] = [] - self.before_request_hooks: List[BeforeRequestHook] = [] - self.after_success_hooks: List[AfterSuccessHook] = [] - self.after_error_hooks: List[AfterErrorHook] = [] - init_hooks(self) - - def register_sdk_init_hook(self, hook: SDKInitHook) -> None: - self.sdk_init_hooks.append(hook) - - def register_before_request_hook(self, hook: BeforeRequestHook) -> None: - self.before_request_hooks.append(hook) - - def register_after_success_hook(self, hook: AfterSuccessHook) -> None: - self.after_success_hooks.append(hook) - - def register_after_error_hook(self, hook: AfterErrorHook) -> None: - self.after_error_hooks.append(hook) - - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - for hook in self.sdk_init_hooks: - base_url, client = hook.sdk_init(base_url, client) - return base_url, client - - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> httpx.Request: - for hook in self.before_request_hooks: - out = hook.before_request(hook_ctx, request) - if isinstance(out, Exception): - raise out - request = out - - return request - - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> httpx.Response: - for hook in self.after_success_hooks: - out = hook.after_success(hook_ctx, response) - if isinstance(out, Exception): - raise out - response = out - return response - - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: - for hook in self.after_error_hooks: - result = hook.after_error(hook_ctx, response, error) - if isinstance(result, Exception): - raise result - response, error = result - return response, error diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py deleted file mode 100644 index bb867b5b..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from abc import ABC, abstractmethod -import httpx -from mistralai_gcp.httpclient import HttpClient -from typing import Any, Callable, List, Optional, Tuple, Union - - -class HookContext: - base_url: str - operation_id: str - oauth2_scopes: Optional[List[str]] = None - security_source: Optional[Union[Any, Callable[[], Any]]] = None - - def __init__( - self, - base_url: str, - operation_id: str, - oauth2_scopes: Optional[List[str]], - security_source: Optional[Union[Any, Callable[[], Any]]], - ): - self.base_url = base_url - self.operation_id = operation_id - self.oauth2_scopes = oauth2_scopes - self.security_source = security_source - - -class BeforeRequestContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterSuccessContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterErrorContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class SDKInitHook(ABC): - @abstractmethod - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - pass - - -class BeforeRequestHook(ABC): - @abstractmethod - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - pass - - -class AfterSuccessHook(ABC): - @abstractmethod - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> Union[httpx.Response, Exception]: - pass - - -class AfterErrorHook(ABC): - @abstractmethod - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: - pass - - -class Hooks(ABC): - @abstractmethod - def register_sdk_init_hook(self, hook: SDKInitHook): - pass - - @abstractmethod - def register_before_request_hook(self, hook: BeforeRequestHook): - pass - - @abstractmethod - def register_after_success_hook(self, hook: AfterSuccessHook): - pass - - @abstractmethod - def register_after_error_hook(self, hook: AfterErrorHook): - pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py deleted file mode 100644 index 11f38b63..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import importlib.metadata - -__title__: str = "mistralai-gcp" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp" - -try: - if __package__ is not None: - __version__ = importlib.metadata.version(__package__) -except importlib.metadata.PackageNotFoundError: - pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py deleted file mode 100644 index bb0aab96..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ /dev/null @@ -1,362 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkconfiguration import SDKConfiguration -import httpx -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import ( - AfterErrorContext, - AfterSuccessContext, - BeforeRequestContext, -) -from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Mapping, Optional, Tuple -from urllib.parse import parse_qs, urlparse - - -class BaseSDK: - sdk_configuration: SDKConfiguration - - def __init__(self, sdk_config: SDKConfiguration) -> None: - self.sdk_configuration = sdk_config - - def _get_url(self, base_url, url_variables): - sdk_url, sdk_variables = self.sdk_configuration.get_server_details() - - if base_url is None: - base_url = sdk_url - - if url_variables is None: - url_variables = sdk_variables - - return utils.template_url(base_url, url_variables) - - def _build_request_async( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.async_client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - ) - - def _build_request( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - ) - - def _build_request_with_client( - self, - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - query_params = {} - - url = url_override - if url is None: - url = utils.generate_url( - self._get_url(base_url, url_variables), - path, - request if request_has_path_params else None, - _globals if request_has_path_params else None, - ) - - query_params = utils.get_query_params( - request if request_has_query_params else None, - _globals if request_has_query_params else None, - ) - else: - # Pick up the query parameter from the override so they can be - # preserved when building the request later on (necessary as of - # httpx 0.28). - parsed_override = urlparse(str(url_override)) - query_params = parse_qs(parsed_override.query, keep_blank_values=True) - - headers = utils.get_headers(request, _globals) - headers["Accept"] = accept_header_value - headers[user_agent_header] = self.sdk_configuration.user_agent - - if security is not None: - if callable(security): - security = security() - - if security is not None: - security_headers, security_query_params = utils.get_security(security) - headers = {**headers, **security_headers} - query_params = {**query_params, **security_query_params} - - serialized_request_body = SerializedRequestBody() - if get_serialized_body is not None: - rb = get_serialized_body() - if request_body_required and rb is None: - raise ValueError("request body is required") - - if rb is not None: - serialized_request_body = rb - - if ( - serialized_request_body.media_type is not None - and serialized_request_body.media_type - not in ( - "multipart/form-data", - "multipart/mixed", - ) - ): - headers["content-type"] = serialized_request_body.media_type - - if http_headers is not None: - for header, value in http_headers.items(): - headers[header] = value - - timeout = timeout_ms / 1000 if timeout_ms is not None else None - - return client.build_request( - method, - url, - params=query_params, - content=serialized_request_body.content, - data=serialized_request_body.data, - files=serialized_request_body.files, - headers=headers, - timeout=timeout, - ) - - def do_request( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.client - logger = self.sdk_configuration.debug_logger - - def do(): - http_res = None - try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = client.send(req, stream=stream) - except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") - - return http_res - - if retry_config is not None: - http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) - else: - http_res = do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) - - return http_res - - async def do_request_async( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.async_client - logger = self.sdk_configuration.debug_logger - - async def do(): - http_res = None - try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = await client.send(req, stream=stream) - except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") - - return http_res - - if retry_config is not None: - http_res = await utils.retry_async( - do, utils.Retries(retry_config[0], retry_config[1]) - ) - else: - http_res = await do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) - - return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py deleted file mode 100644 index dba369bf..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ /dev/null @@ -1,692 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET -from mistralai_gcp.utils import eventstreaming -from typing import Any, List, Mapping, Optional, Union - - -class Chat(BaseSDK): - r"""Chat Completion API.""" - - def stream( - self, - *, - model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - ) - - req = self._build_request( - method="POST", - path="/streamRawPredict", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def stream_async( - self, - *, - model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - ) - - req = self._build_request_async( - method="POST", - path="/streamRawPredict", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def complete( - self, - *, - model: str, - messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.ChatCompletionRequestStop, - models.ChatCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - ) - - req = self._build_request( - method="POST", - path="/rawPredict", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def complete_async( - self, - *, - model: str, - messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.ChatCompletionRequestStop, - models.ChatCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - ) - - req = self._build_request_async( - method="POST", - path="/rawPredict", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py deleted file mode 100644 index 84821c6a..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ /dev/null @@ -1,556 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET -from mistralai_gcp.utils import eventstreaming -from typing import Any, Mapping, Optional, Union - - -class Fim(BaseSDK): - r"""Fill-in-the-middle API.""" - - def stream( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models.FIMCompletionStreamRequestStop, - models.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/streamRawPredict#fim", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def stream_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models.FIMCompletionStreamRequestStop, - models.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/streamRawPredict#fim", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def complete( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.FIMCompletionRequestStop, - models.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/rawPredict#fim", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def complete_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.FIMCompletionRequestStop, - models.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/rawPredict#fim", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], - security_source=self.sdk_configuration.security, - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py deleted file mode 100644 index 1e426352..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -# pyright: reportReturnType = false -import asyncio -from concurrent.futures import ThreadPoolExecutor -from typing_extensions import Protocol, runtime_checkable -import httpx -from typing import Any, Optional, Union - - -@runtime_checkable -class HttpClient(Protocol): - def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - def close(self) -> None: - pass - - -@runtime_checkable -class AsyncHttpClient(Protocol): - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - async def aclose(self) -> None: - pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - is_async = False - try: - asyncio.get_running_loop() - is_async = True - except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: - asyncio.run(async_client.aclose()) - except Exception: - pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py deleted file mode 100644 index 752e70e6..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ /dev/null @@ -1,233 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, -) -from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, -) -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) - - -__all__ = [ - "Arguments", - "ArgumentsTypedDict", - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentTypedDict", - "AssistantMessageRole", - "AssistantMessageTypedDict", - "ChatCompletionChoice", - "ChatCompletionChoiceFinishReason", - "ChatCompletionChoiceTypedDict", - "ChatCompletionRequest", - "ChatCompletionRequestMessages", - "ChatCompletionRequestMessagesTypedDict", - "ChatCompletionRequestStop", - "ChatCompletionRequestStopTypedDict", - "ChatCompletionRequestToolChoice", - "ChatCompletionRequestToolChoiceTypedDict", - "ChatCompletionRequestTypedDict", - "ChatCompletionResponse", - "ChatCompletionResponseTypedDict", - "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestToolChoice", - "ChatCompletionStreamRequestToolChoiceTypedDict", - "ChatCompletionStreamRequestTypedDict", - "CompletionChunk", - "CompletionChunkTypedDict", - "CompletionEvent", - "CompletionEventTypedDict", - "CompletionResponseStreamChoice", - "CompletionResponseStreamChoiceTypedDict", - "Content", - "ContentChunk", - "ContentChunkTypedDict", - "ContentTypedDict", - "DeltaMessage", - "DeltaMessageTypedDict", - "FIMCompletionRequest", - "FIMCompletionRequestStop", - "FIMCompletionRequestStopTypedDict", - "FIMCompletionRequestTypedDict", - "FIMCompletionResponse", - "FIMCompletionResponseTypedDict", - "FIMCompletionStreamRequest", - "FIMCompletionStreamRequestStop", - "FIMCompletionStreamRequestStopTypedDict", - "FIMCompletionStreamRequestTypedDict", - "FinishReason", - "Function", - "FunctionCall", - "FunctionCallTypedDict", - "FunctionName", - "FunctionNameTypedDict", - "FunctionTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", - "ImageURL", - "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", - "ImageURLChunkTypedDict", - "ImageURLTypedDict", - "JSONSchema", - "JSONSchemaTypedDict", - "Loc", - "LocTypedDict", - "Messages", - "MessagesTypedDict", - "Prediction", - "PredictionTypedDict", - "ReferenceChunk", - "ReferenceChunkType", - "ReferenceChunkTypedDict", - "ResponseFormat", - "ResponseFormatTypedDict", - "ResponseFormats", - "Role", - "SDKError", - "Security", - "SecurityTypedDict", - "Stop", - "StopTypedDict", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentTypedDict", - "SystemMessageTypedDict", - "TextChunk", - "TextChunkTypedDict", - "Tool", - "ToolCall", - "ToolCallTypedDict", - "ToolChoice", - "ToolChoiceEnum", - "ToolChoiceTypedDict", - "ToolMessage", - "ToolMessageContent", - "ToolMessageContentTypedDict", - "ToolMessageRole", - "ToolMessageTypedDict", - "ToolTypedDict", - "ToolTypes", - "Type", - "UsageInfo", - "UsageInfoTypedDict", - "UserMessage", - "UserMessageContent", - "UserMessageContentTypedDict", - "UserMessageRole", - "UserMessageTypedDict", - "ValidationError", - "ValidationErrorTypedDict", -] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py deleted file mode 100644 index 9147f566..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AssistantMessageContentTypedDict = TypeAliasType( - "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -AssistantMessageContent = TypeAliasType( - "AssistantMessageContent", Union[str, List[ContentChunk]] -) - - -AssistantMessageRole = Literal["assistant"] - - -class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[AssistantMessageContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] - - -class AssistantMessage(BaseModel): - content: OptionalNullable[AssistantMessageContent] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - - role: Optional[AssistantMessageRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py deleted file mode 100644 index 9bcf1240..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_gcp.types import BaseModel, UnrecognizedStr -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Literal, Union -from typing_extensions import Annotated, TypedDict - - -ChatCompletionChoiceFinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr -] - - -class ChatCompletionChoiceTypedDict(TypedDict): - index: int - message: AssistantMessageTypedDict - finish_reason: ChatCompletionChoiceFinishReason - - -class ChatCompletionChoice(BaseModel): - index: int - - message: AssistantMessage - - finish_reason: Annotated[ - ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) - ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py deleted file mode 100644 index a0125c35..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ /dev/null @@ -1,188 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from mistralai_gcp.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ChatCompletionRequestStopTypedDict = TypeAliasType( - "ChatCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestStop = TypeAliasType( - "ChatCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -ChatCompletionRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -ChatCompletionRequestToolChoice = TypeAliasType( - "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class ChatCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[ChatCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - - -class ChatCompletionRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[ChatCompletionRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[ChatCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[ChatCompletionRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py deleted file mode 100644 index 0404a9d2..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class ChatCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - - -class ChatCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: Optional[int] = None - - choices: Optional[List[ChatCompletionChoice]] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py deleted file mode 100644 index 656f1d58..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ /dev/null @@ -1,182 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from mistralai_gcp.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = TypeAliasType("Stop", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Messages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -ChatCompletionStreamRequestToolChoice = TypeAliasType( - "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class ChatCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[StopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - - -class ChatCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[Messages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[Stop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py deleted file mode 100644 index ca002f52..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionChunkTypedDict(TypedDict): - id: str - model: str - choices: List[CompletionResponseStreamChoiceTypedDict] - object: NotRequired[str] - created: NotRequired[int] - usage: NotRequired[UsageInfoTypedDict] - - -class CompletionChunk(BaseModel): - id: str - - model: str - - choices: List[CompletionResponseStreamChoice] - - object: Optional[str] = None - - created: Optional[int] = None - - usage: Optional[UsageInfo] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py deleted file mode 100644 index 33278c11..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict - - -class CompletionEventTypedDict(TypedDict): - data: CompletionChunkTypedDict - - -class CompletionEvent(BaseModel): - data: CompletionChunk diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py deleted file mode 100644 index 8d779971..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from mistralai_gcp.utils import validate_open_enum -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing import Literal, Union -from typing_extensions import Annotated, TypedDict - - -FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] - - -class CompletionResponseStreamChoiceTypedDict(TypedDict): - index: int - delta: DeltaMessageTypedDict - finish_reason: Nullable[FinishReason] - - -class CompletionResponseStreamChoice(BaseModel): - index: int - - delta: DeltaMessage - - finish_reason: Annotated[ - Nullable[FinishReason], PlainValidator(validate_open_enum(False)) - ] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py deleted file mode 100644 index da5671e3..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_gcp.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", - Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], -) - - -ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py deleted file mode 100644 index f9f0868b..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ /dev/null @@ -1,67 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) - - -class DeltaMessageTypedDict(TypedDict): - role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - - -class DeltaMessage(BaseModel): - role: OptionalNullable[str] = UNSET - - content: OptionalNullable[Content] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py deleted file mode 100644 index 6dfb7373..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionRequestStopTypedDict = TypeAliasType( - "FIMCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionRequestStop = TypeAliasType( - "FIMCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[FIMCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionRequest(BaseModel): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[FIMCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py deleted file mode 100644 index a4d273a2..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class FIMCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - - -class FIMCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: Optional[int] = None - - choices: Optional[List[ChatCompletionChoice]] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py deleted file mode 100644 index 406749bb..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ /dev/null @@ -1,129 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionStreamRequestStopTypedDict = TypeAliasType( - "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionStreamRequestStop = TypeAliasType( - "FIMCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[FIMCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py deleted file mode 100644 index 7ad1ae64..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class FunctionTypedDict(TypedDict): - name: str - parameters: Dict[str, Any] - description: NotRequired[str] - strict: NotRequired[bool] - - -class Function(BaseModel): - name: str - - parameters: Dict[str, Any] - - description: Optional[str] = None - - strict: Optional[bool] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py deleted file mode 100644 index 99554c88..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) - - -Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) - - -class FunctionCallTypedDict(TypedDict): - name: str - arguments: ArgumentsTypedDict - - -class FunctionCall(BaseModel): - name: str - - arguments: Arguments diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py deleted file mode 100644 index 00ec22f5..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict - - -class FunctionNameTypedDict(TypedDict): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str - - -class FunctionName(BaseModel): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py deleted file mode 100644 index 11024f85..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .validationerror import ValidationError -from mistralai_gcp import utils -from mistralai_gcp.types import BaseModel -from typing import List, Optional - - -class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None - - -class HTTPValidationError(Exception): - data: HTTPValidationErrorData - - def __init__(self, data: HTTPValidationErrorData): - self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py deleted file mode 100644 index e7aa11f0..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class ImageURLTypedDict(TypedDict): - url: str - detail: NotRequired[Nullable[str]] - - -class ImageURL(BaseModel): - url: str - - detail: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py deleted file mode 100644 index 1fc0b808..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai_gcp.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url"] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py deleted file mode 100644 index 2529ce31..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py deleted file mode 100644 index 742aac0b..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py deleted file mode 100644 index c4fa3b8b..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference"] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py deleted file mode 100644 index 5a24f644..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .responseformats import ResponseFormats -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ResponseFormatTypedDict(TypedDict): - type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] - - -class ResponseFormat(BaseModel): - type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py deleted file mode 100644 index 08c39951..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py deleted file mode 100644 index 03216cbf..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from dataclasses import dataclass -from typing import Optional -import httpx - - -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" - - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None - - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" - - return f"{self.message}: Status {self.status_code}{body}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py deleted file mode 100644 index 38574942..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import FieldMetadata, SecurityMetadata -from typing_extensions import Annotated, TypedDict - - -class SecurityTypedDict(TypedDict): - api_key: str - - -class Security(BaseModel): - api_key: Annotated[ - str, - FieldMetadata( - security=SecurityMetadata( - scheme=True, - scheme_type="http", - sub_type="bearer", - field_name="Authorization", - ) - ), - ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py deleted file mode 100644 index f14acf12..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] -) - - -SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] -) - - -Role = Literal["system"] - - -class SystemMessageTypedDict(TypedDict): - content: SystemMessageContentTypedDict - role: NotRequired[Role] - - -class SystemMessage(BaseModel): - content: SystemMessageContent - - role: Optional[Role] = "system" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py deleted file mode 100644 index 12f666cd..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["text"] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[Type] - - -class TextChunk(BaseModel): - text: str - - type: Optional[Type] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py deleted file mode 100644 index a1d477da..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[ToolTypes] - - -class Tool(BaseModel): - function: Function - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py deleted file mode 100644 index ecbac8d6..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncall import FunctionCall, FunctionCallTypedDict -from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolCallTypedDict(TypedDict): - function: FunctionCallTypedDict - id: NotRequired[str] - type: NotRequired[ToolTypes] - index: NotRequired[int] - - -class ToolCall(BaseModel): - function: FunctionCall - - id: Optional[str] = "null" - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) - - index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py deleted file mode 100644 index dc213e62..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functionname import FunctionName, FunctionNameTypedDict -from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolChoiceTypedDict(TypedDict): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionNameTypedDict - r"""this restriction of `Function` is used to select a specific function to call""" - type: NotRequired[ToolTypes] - - -class ToolChoice(BaseModel): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionName - r"""this restriction of `Function` is used to select a specific function to call""" - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py deleted file mode 100644 index 8e6a6ad8..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ToolChoiceEnum = Literal["auto", "none", "any", "required"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py deleted file mode 100644 index 886b6ff1..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolMessageContentTypedDict = TypeAliasType( - "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) - - -ToolMessageRole = Literal["tool"] - - -class ToolMessageTypedDict(TypedDict): - content: Nullable[ToolMessageContentTypedDict] - tool_call_id: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] - - -class ToolMessage(BaseModel): - content: Nullable[ToolMessageContent] - - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - role: Optional[ToolMessageRole] = "tool" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py deleted file mode 100644 index 878444c6..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import UnrecognizedStr -from typing import Literal, Union - - -ToolTypes = Union[Literal["function"], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py deleted file mode 100644 index 9de6af7e..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict - - -class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class UsageInfo(BaseModel): - prompt_tokens: int - - completion_tokens: int - - total_tokens: int diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py deleted file mode 100644 index 287bb1b4..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -UserMessageContentTypedDict = TypeAliasType( - "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) - - -UserMessageRole = Literal["user"] - - -class UserMessageTypedDict(TypedDict): - content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] - - -class UserMessage(BaseModel): - content: Nullable[UserMessageContent] - - role: Optional[UserMessageRole] = "user" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role"] - nullable_fields = ["content"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py deleted file mode 100644 index 033d4b63..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import List, Union -from typing_extensions import TypeAliasType, TypedDict - - -LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) - - -Loc = TypeAliasType("Loc", Union[str, int]) - - -class ValidationErrorTypedDict(TypedDict): - loc: List[LocTypedDict] - msg: str - type: str - - -class ValidationError(BaseModel): - loc: List[Loc] - - msg: str - - type: str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py deleted file mode 100644 index dd93cc7f..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ /dev/null @@ -1,231 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import json -import weakref -from typing import Any, Optional, cast - -import google.auth -import google.auth.credentials -import google.auth.transport -import google.auth.transport.requests -import httpx - -from mistralai_gcp import models -from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks -from mistralai_gcp.chat import Chat -from mistralai_gcp.fim import Fim -from mistralai_gcp.types import UNSET, OptionalNullable - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig - -LEGACY_MODEL_ID_FORMAT = { - "codestral-2405": "codestral@2405", - "mistral-large-2407": "mistral-large@2407", - "mistral-nemo-2407": "mistral-nemo@2407", -} - - -def get_model_info(model: str) -> tuple[str, str]: - # if the model requiers the legacy fomat, use it, else do nothing. - if model in LEGACY_MODEL_ID_FORMAT: - return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] - return model, model - - -class MistralGoogleCloud(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - chat: Chat - r"""Chat Completion API.""" - fim: Fim - r"""Fill-in-the-middle API.""" - - def __init__( - self, - region: str = "europe-west4", - project_id: Optional[str] = None, - access_token: Optional[str] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - - if not access_token: - credentials, loaded_project_id = google.auth.default( - scopes=["https://www.googleapis.com/auth/cloud-platform"], - ) - credentials.refresh(google.auth.transport.requests.Request()) - - if not isinstance(credentials, google.auth.credentials.Credentials): - raise models.SDKError( - "credentials must be an instance of google.auth.credentials.Credentials" - ) - - project_id = project_id or loaded_project_id - - if project_id is None: - raise models.SDKError("project_id must be provided") - - def auth_token() -> str: - if access_token: - return access_token - - credentials.refresh(google.auth.transport.requests.Request()) - token = credentials.token - if not token: - raise models.SDKError("Failed to get token from credentials") - return token - - client_supplied = True - if client is None: - client = httpx.Client() - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(auth_token): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=auth_token() - ) - else: - security = models.Security(api_key=auth_token) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=f"https://{region}-aiplatform.googleapis.com", - server=None, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - ) - - hooks = SDKHooks() - hook = GoogleCloudBeforeRequestHook(region, project_id) - hooks.register_before_request_hook(hook) - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - self._init_sdks() - - def _init_sdks(self): - self.chat = Chat(self.sdk_configuration) - self.fim = Fim(self.sdk_configuration) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None - - -class GoogleCloudBeforeRequestHook(BeforeRequestHook): - def __init__(self, region: str, project_id: str): - self.region = region - self.project_id = project_id - - def before_request( - self, hook_ctx, request: httpx.Request - ) -> httpx.Request | Exception: - # The goal of this function is to template in the region, project and model into the URL path - # We do this here so that the API remains more user-friendly - model_id = None - new_content = None - if request.content: - parsed = json.loads(request.content.decode("utf-8")) - model_raw = parsed.get("model") - model_name, model_id = get_model_info(model_raw) - parsed["model"] = model_name - new_content = json.dumps(parsed).encode("utf-8") - - if model_id == "": - raise models.SDKError("model must be provided") - - stream = "streamRawPredict" in request.url.path - specifier = "streamRawPredict" if stream else "rawPredict" - url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" - - headers = dict(request.headers) - # Delete content-length header as it will need to be recalculated - headers.pop("content-length", None) - - next_request = httpx.Request( - method=request.method, - url=request.url.copy_with(path=url), - headers=headers, - content=new_content, - stream=None, - ) - - return next_request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py deleted file mode 100644 index c373d27d..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._hooks import SDKHooks -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) -from .httpclient import AsyncHttpClient, HttpClient -from .utils import Logger, RetryConfig, remove_suffix -from dataclasses import dataclass -from mistralai_gcp import models -from mistralai_gcp.types import OptionalNullable, UNSET -from pydantic import Field -from typing import Callable, Dict, Optional, Tuple, Union - - -SERVER_EU = "eu" -r"""EU Production server""" -SERVERS = { - SERVER_EU: "https://api.mistral.ai", -} -"""Contains the list of servers available to the SDK""" - - -@dataclass -class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool - debug_logger: Logger - security: Optional[Union[models.Security, Callable[[], models.Security]]] = None - server_url: Optional[str] = "" - server: Optional[str] = "" - language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ - retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) - timeout_ms: Optional[int] = None - - def __post_init__(self): - self._hooks = SDKHooks() - - def get_server_details(self) -> Tuple[str, Dict[str, str]]: - if self.server_url is not None and self.server_url: - return remove_suffix(self.server_url, "/"), {} - if not self.server: - self.server = SERVER_EU - - if self.server not in SERVERS: - raise ValueError(f'Invalid server "{self.server}"') - - return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py deleted file mode 100644 index a6187efa..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from pydantic import ConfigDict, model_serializer -from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType -from typing_extensions import TypeAliasType, TypeAlias - - -class BaseModel(PydanticBaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() - ) - - -class Unset(BaseModel): - @model_serializer(mode="plain") - def serialize_model(self): - return UNSET_SENTINEL - - def __bool__(self) -> Literal[False]: - return False - - -UNSET = Unset() -UNSET_SENTINEL = "~?~unset~?~sentinel~?~" - - -T = TypeVar("T") -if TYPE_CHECKING: - Nullable: TypeAlias = Union[T, None] - OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] -else: - Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) - OptionalNullable = TypeAliasType( - "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) - ) - -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py deleted file mode 100644 index 3cded8fe..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger - -__all__ = [ - "BackoffStrategy", - "FieldMetadata", - "find_metadata", - "FormMetadata", - "generate_url", - "get_body_content", - "get_default_logger", - "get_discriminator", - "get_global_from_env", - "get_headers", - "get_pydantic_model", - "get_query_params", - "get_response_headers", - "get_security", - "HeaderMetadata", - "Logger", - "marshal_json", - "match_content_type", - "match_status_codes", - "match_response", - "MultipartFormMetadata", - "OpenEnumMeta", - "PathParamMetadata", - "QueryParamMetadata", - "remove_suffix", - "Retries", - "retry", - "retry_async", - "RetryConfig", - "RequestMetadata", - "SecurityMetadata", - "serialize_decimal", - "serialize_float", - "serialize_int", - "serialize_request_body", - "SerializedRequestBody", - "stream_to_text", - "stream_to_text_async", - "stream_to_bytes", - "stream_to_bytes_async", - "template_url", - "unmarshal", - "unmarshal_json", - "validate_decimal", - "validate_const", - "validate_float", - "validate_int", - "validate_open_enum", - "cast_partial", -] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py deleted file mode 100644 index 387874ed..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from enum import Enum -from typing import Any, Optional - -def get_discriminator(model: Any, fieldname: str, key: str) -> str: - """ - Recursively search for the discriminator attribute in a model. - - Args: - model (Any): The model to search within. - fieldname (str): The name of the field to search for. - key (str): The key to search for in dictionaries. - - Returns: - str: The name of the discriminator attribute. - - Raises: - ValueError: If the discriminator attribute is not found. - """ - upper_fieldname = fieldname.upper() - - def get_field_discriminator(field: Any) -> Optional[str]: - """Search for the discriminator attribute in a given field.""" - - if isinstance(field, dict): - if key in field: - return f'{field[key]}' - - if hasattr(field, fieldname): - attr = getattr(field, fieldname) - if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' - - if hasattr(field, upper_fieldname): - attr = getattr(field, upper_fieldname) - if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' - - return None - - - if isinstance(model, list): - for field in model: - discriminator = get_field_discriminator(field) - if discriminator is not None: - return discriminator - - discriminator = get_field_discriminator(model) - if discriminator is not None: - return discriminator - - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py deleted file mode 100644 index c650b10c..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import enum - - -class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py deleted file mode 100644 index 74a63f75..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py +++ /dev/null @@ -1,238 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import re -import json -from typing import ( - Callable, - Generic, - TypeVar, - Optional, - Generator, - AsyncGenerator, - Tuple, -) -import httpx - -T = TypeVar("T") - - -class EventStream(Generic[T]): - response: httpx.Response - generator: Generator[T, None, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - ): - self.response = response - self.generator = stream_events(response, decoder, sentinel) - - def __iter__(self): - return self - - def __next__(self): - return next(self.generator) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.response.close() - - -class EventStreamAsync(Generic[T]): - response: httpx.Response - generator: AsyncGenerator[T, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - ): - self.response = response - self.generator = stream_events_async(response, decoder, sentinel) - - def __aiter__(self): - return self - - async def __anext__(self): - return await self.generator.__anext__() - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.response.aclose() - - -class ServerEvent: - id: Optional[str] = None - event: Optional[str] = None - data: Optional[str] = None - retry: Optional[int] = None - - -MESSAGE_BOUNDARIES = [ - b"\r\n\r\n", - b"\n\n", - b"\r\r", -] - - -async def stream_events_async( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> AsyncGenerator[T, None]: - buffer = bytearray() - position = 0 - discard = False - async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def stream_events( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> Generator[T, None, None]: - buffer = bytearray() - position = 0 - discard = False - for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: - block = raw.decode() - lines = re.split(r"\r?\n|\r", block) - publish = False - event = ServerEvent() - data = "" - for line in lines: - if not line: - continue - - delim = line.find(":") - if delim <= 0: - continue - - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] - - if field == "event": - event.event = value - publish = True - elif field == "data": - data += value + "\n" - publish = True - elif field == "id": - event.id = value - publish = True - elif field == "retry": - event.retry = int(value) if value.isdigit() else None - publish = True - - if sentinel and data == f"{sentinel}\n": - return None, True - - if data: - data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass - - out = None - if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False - - -def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): - if len(sequence) > (len(buffer) - position): - return None - - for i, seq in enumerate(sequence): - if buffer[position + i] != seq: - return None - - return sequence diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py deleted file mode 100644 index 0472aba8..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ /dev/null @@ -1,202 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .serializers import marshal_json - -from .metadata import ( - FormMetadata, - MultipartFormMetadata, - find_field_metadata, -) -from .values import _is_set, _val_to_string - - -def _populate_form( - field_name: str, - explode: bool, - obj: Any, - delimiter: str, - form: Dict[str, List[str]], -): - if not _is_set(obj): - return form - - if isinstance(obj, BaseModel): - items = [] - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_field_name = obj_field.alias if obj_field.alias is not None else name - if obj_field_name == "": - continue - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - form[obj_field_name] = [_val_to_string(val)] - else: - items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, Dict): - items = [] - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - form[key] = [_val_to_string(value)] - else: - items.append(f"{key}{delimiter}{_val_to_string(value)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - if explode: - if not field_name in form: - form[field_name] = [] - form[field_name].append(_val_to_string(value)) - else: - items.append(_val_to_string(value)) - - if len(items) > 0: - form[field_name] = [delimiter.join([str(item) for item in items])] - else: - form[field_name] = [_val_to_string(obj)] - - return form - - -def serialize_multipart_form( - media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: - form: Dict[str, Any] = {} - files: Dict[str, Any] = {} - - if not isinstance(request, BaseModel): - raise TypeError("invalid request body type") - - request_fields: Dict[str, FieldInfo] = request.__class__.model_fields - request_field_types = get_type_hints(request.__class__) - - for name in request_fields: - field = request_fields[name] - - val = getattr(request, name) - if not _is_set(val): - continue - - field_metadata = find_field_metadata(field, MultipartFormMetadata) - if not field_metadata: - continue - - f_name = field.alias if field.alias else name - - if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] - - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue - - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) - else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) - elif field_metadata.json: - files[f_name] = ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ) - else: - if isinstance(val, List): - values = [] - - for value in val: - if not _is_set(value): - continue - values.append(_val_to_string(value)) - - form[f_name + "[]"] = values - else: - form[f_name] = _val_to_string(val) - return media_type, form, files - - -def serialize_form_data(data: Any) -> Dict[str, Any]: - form: Dict[str, List[str]] = {} - - if isinstance(data, BaseModel): - data_fields: Dict[str, FieldInfo] = data.__class__.model_fields - data_field_types = get_type_hints(data.__class__) - for name in data_fields: - field = data_fields[name] - - val = getattr(data, name) - if not _is_set(val): - continue - - metadata = find_field_metadata(field, FormMetadata) - if metadata is None: - continue - - f_name = field.alias if field.alias is not None else name - - if metadata.json: - form[f_name] = [marshal_json(val, data_field_types[name])] - else: - if metadata.style == "form": - _populate_form( - f_name, - metadata.explode, - val, - ",", - form, - ) - else: - raise ValueError(f"Invalid form style for field {name}") - elif isinstance(data, Dict): - for key, value in data.items(): - if _is_set(value): - form[key] = [_val_to_string(value)] - else: - raise TypeError(f"Invalid request body type {type(data)} for form data") - - return form diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py deleted file mode 100644 index 37a6e7f9..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py +++ /dev/null @@ -1,205 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, -) - -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - QueryParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) -from .forms import _populate_form - - -def get_query_params( - query_params: Any, - gbls: Optional[Any] = None, -) -> Dict[str, List[str]]: - params: Dict[str, List[str]] = {} - - globals_already_populated = _populate_query_params(query_params, gbls, params, []) - if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) - - return params - - -def _populate_query_params( - query_params: Any, - gbls: Any, - query_param_values: Dict[str, List[str]], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(query_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields - param_field_types = get_type_hints(query_params.__class__) - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - - metadata = find_field_metadata(field, QueryParamMetadata) - if not metadata: - continue - - value = getattr(query_params, name) if _is_set(query_params) else None - - value, global_found = _populate_from_globals( - name, value, QueryParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - f_name = field.alias if field.alias is not None else name - serialization = metadata.serialization - if serialization is not None: - serialized_parms = _get_serialized_params( - metadata, f_name, value, param_field_types[name] - ) - for key, value in serialized_parms.items(): - if key in query_param_values: - query_param_values[key].extend(value) - else: - query_param_values[key] = [value] - else: - style = metadata.style - if style == "deepObject": - _populate_deep_object_query_params(f_name, value, query_param_values) - elif style == "form": - _populate_delimited_query_params( - metadata, f_name, value, ",", query_param_values - ) - elif style == "pipeDelimited": - _populate_delimited_query_params( - metadata, f_name, value, "|", query_param_values - ) - else: - raise NotImplementedError( - f"query param style {style} not yet supported" - ) - - return globals_already_populated - - -def _populate_deep_object_query_params( - field_name: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj): - return - - if isinstance(obj, BaseModel): - _populate_deep_object_query_params_basemodel(field_name, obj, params) - elif isinstance(obj, Dict): - _populate_deep_object_query_params_dict(field_name, obj, params) - - -def _populate_deep_object_query_params_basemodel( - prior_params_key: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj) or not isinstance(obj, BaseModel): - return - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - - f_name = obj_field.alias if obj_field.alias is not None else name - - params_key = f"{prior_params_key}[{f_name}]" - - obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if not _is_set(obj_param_metadata): - continue - - obj_val = getattr(obj, name) - if not _is_set(obj_val): - continue - - if isinstance(obj_val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, obj_val, params) - elif isinstance(obj_val, Dict): - _populate_deep_object_query_params_dict(params_key, obj_val, params) - elif isinstance(obj_val, List): - _populate_deep_object_query_params_list(params_key, obj_val, params) - else: - params[params_key] = [_val_to_string(obj_val)] - - -def _populate_deep_object_query_params_dict( - prior_params_key: str, - value: Dict, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for key, val in value.items(): - if not _is_set(val): - continue - - params_key = f"{prior_params_key}[{key}]" - - if isinstance(val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, val, params) - elif isinstance(val, Dict): - _populate_deep_object_query_params_dict(params_key, val, params) - elif isinstance(val, List): - _populate_deep_object_query_params_list(params_key, val, params) - else: - params[params_key] = [_val_to_string(val)] - - -def _populate_deep_object_query_params_list( - params_key: str, - value: List, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for val in value: - if not _is_set(val): - continue - - if params.get(params_key) is None: - params[params_key] = [] - - params[params_key].append(_val_to_string(val)) - - -def _populate_delimited_query_params( - metadata: QueryParamMetadata, - field_name: str, - obj: Any, - delimiter: str, - query_param_values: Dict[str, List[str]], -): - _populate_form( - field_name, - metadata.explode, - obj, - delimiter, - query_param_values, - ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py deleted file mode 100644 index d5240dd5..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import io -from dataclasses import dataclass -import re -from typing import ( - Any, - Optional, -) - -from .forms import serialize_form_data, serialize_multipart_form - -from .serializers import marshal_json - -SERIALIZATION_METHOD_TO_CONTENT_TYPE = { - "json": "application/json", - "form": "application/x-www-form-urlencoded", - "multipart": "multipart/form-data", - "raw": "application/octet-stream", - "string": "text/plain", -} - - -@dataclass -class SerializedRequestBody: - media_type: Optional[str] = None - content: Optional[Any] = None - data: Optional[Any] = None - files: Optional[Any] = None - - -def serialize_request_body( - request_body: Any, - nullable: bool, - optional: bool, - serialization_method: str, - request_body_type, -) -> Optional[SerializedRequestBody]: - if request_body is None: - if not nullable and optional: - return None - - media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] - - serialized_request_body = SerializedRequestBody(media_type) - - if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: - serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"multipart\/.*", media_type) is not None: - ( - serialized_request_body.media_type, - serialized_request_body.data, - serialized_request_body.files, - ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: - serialized_request_body.data = serialize_form_data(request_body) - elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): - serialized_request_body.content = request_body - elif isinstance(request_body, str): - serialized_request_body.content = request_body - else: - raise TypeError( - f"invalid request body type {type(request_body)} for mediaType {media_type}" - ) - - return serialized_request_body diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py deleted file mode 100644 index 4d608671..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import asyncio -import random -import time -from typing import List - -import httpx - - -class BackoffStrategy: - initial_interval: int - max_interval: int - exponent: float - max_elapsed_time: int - - def __init__( - self, - initial_interval: int, - max_interval: int, - exponent: float, - max_elapsed_time: int, - ): - self.initial_interval = initial_interval - self.max_interval = max_interval - self.exponent = exponent - self.max_elapsed_time = max_elapsed_time - - -class RetryConfig: - strategy: str - backoff: BackoffStrategy - retry_connection_errors: bool - - def __init__( - self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool - ): - self.strategy = strategy - self.backoff = backoff - self.retry_connection_errors = retry_connection_errors - - -class Retries: - config: RetryConfig - status_codes: List[str] - - def __init__(self, config: RetryConfig, status_codes: List[str]): - self.config = config - self.status_codes = status_codes - - -class TemporaryError(Exception): - response: httpx.Response - - def __init__(self, response: httpx.Response): - self.response = response - - -class PermanentError(Exception): - inner: Exception - - def __init__(self, inner: Exception): - self.inner = inner - - -def retry(func, retries: Retries): - if retries.config.strategy == "backoff": - - def do_request() -> httpx.Response: - res: httpx.Response - try: - res = func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return retry_with_backoff( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return func() - - -async def retry_async(func, retries: Retries): - if retries.config.strategy == "backoff": - - async def do_request() -> httpx.Response: - res: httpx.Response - try: - res = await func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return await retry_with_backoff_async( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return await func() - - -def retry_with_backoff( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) - retries += 1 - - -async def retry_with_backoff_async( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return await func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) - await asyncio.sleep(sleep) - retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py deleted file mode 100644 index 295a3f40..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py +++ /dev/null @@ -1,174 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import base64 -from typing import ( - Any, - Dict, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - SecurityMetadata, - find_field_metadata, -) - - -def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: - headers: Dict[str, str] = {} - query_params: Dict[str, List[str]] = {} - - if security is None: - return headers, query_params - - if not isinstance(security, BaseModel): - raise TypeError("security must be a pydantic model") - - sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields - for name in sec_fields: - sec_field = sec_fields[name] - - value = getattr(security, name) - if value is None: - continue - - metadata = find_field_metadata(sec_field, SecurityMetadata) - if metadata is None: - continue - if metadata.option: - _parse_security_option(headers, query_params, value) - return headers, query_params - if metadata.scheme: - # Special case for basic auth or custom auth which could be a flattened model - if metadata.sub_type in ["basic", "custom"] and not isinstance( - value, BaseModel - ): - _parse_security_scheme(headers, query_params, metadata, name, security) - else: - _parse_security_scheme(headers, query_params, metadata, name, value) - - return headers, query_params - - -def _parse_security_option( - headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any -): - if not isinstance(option, BaseModel): - raise TypeError("security option must be a pydantic model") - - opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields - for name in opt_fields: - opt_field = opt_fields[name] - - metadata = find_field_metadata(opt_field, SecurityMetadata) - if metadata is None or not metadata.scheme: - continue - _parse_security_scheme( - headers, query_params, metadata, name, getattr(option, name) - ) - - -def _parse_security_scheme( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - field_name: str, - scheme: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - if isinstance(scheme, BaseModel): - if scheme_type == "http": - if sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return - if sub_type == "custom": - return - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - value = getattr(scheme, name) - - _parse_security_scheme_value( - headers, query_params, scheme_metadata, metadata, name, value - ) - else: - _parse_security_scheme_value( - headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme - ) - - -def _parse_security_scheme_value( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - security_metadata: SecurityMetadata, - field_name: str, - value: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - header_name = security_metadata.get_field_name(field_name) - - if scheme_type == "apiKey": - if sub_type == "header": - headers[header_name] = value - elif sub_type == "query": - query_params[header_name] = [value] - else: - raise ValueError("sub type {sub_type} not supported") - elif scheme_type == "openIdConnect": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "oauth2": - if sub_type != "client_credentials": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "http": - if sub_type == "bearer": - headers[header_name] = _apply_bearer(value) - elif sub_type == "custom": - return - else: - raise ValueError("sub type {sub_type} not supported") - else: - raise ValueError("scheme type {scheme_type} not supported") - - -def _apply_bearer(token: str) -> str: - return token.lower().startswith("bearer ") and token or f"Bearer {token}" - - -def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): - username = "" - password = "" - - if not isinstance(scheme, BaseModel): - raise TypeError("basic auth scheme must be a pydantic model") - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - field_name = metadata.field_name - value = getattr(scheme, name) - - if field_name == "username": - username = value - if field_name == "password": - password = value - - data = f"{username}:{password}".encode() - headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py deleted file mode 100644 index baa41fbd..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ /dev/null @@ -1,219 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -import json -from typing import Any, Dict, List, Union, get_args -import httpx -from typing_extensions import get_origin -from pydantic import ConfigDict, create_model -from pydantic_core import from_json -from typing_inspection.typing_objects import is_union - -from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset - - -def serialize_decimal(as_str: bool): - def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: - return None - if isinstance(d, Unset): - return d - - if not isinstance(d, Decimal): - raise ValueError("Expected Decimal object") - - return str(d) if as_str else float(d) - - return serialize - - -def validate_decimal(d): - if d is None: - return None - - if isinstance(d, (Decimal, Unset)): - return d - - if not isinstance(d, (str, int, float)): - raise ValueError("Expected string, int or float") - - return Decimal(str(d)) - - -def serialize_float(as_str: bool): - def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: - return None - if isinstance(f, Unset): - return f - - if not isinstance(f, float): - raise ValueError("Expected float") - - return str(f) if as_str else f - - return serialize - - -def validate_float(f): - if f is None: - return None - - if isinstance(f, (float, Unset)): - return f - - if not isinstance(f, str): - raise ValueError("Expected string") - - return float(f) - - -def serialize_int(as_str: bool): - def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: - return None - if isinstance(i, Unset): - return i - - if not isinstance(i, int): - raise ValueError("Expected int") - - return str(i) if as_str else i - - return serialize - - -def validate_int(b): - if b is None: - return None - - if isinstance(b, (int, Unset)): - return b - - if not isinstance(b, str): - raise ValueError("Expected string") - - return int(b) - - -def validate_open_enum(is_int: bool): - def validate(e): - if e is None: - return None - - if isinstance(e, Unset): - return e - - if is_int: - if not isinstance(e, int): - raise ValueError("Expected int") - else: - if not isinstance(e, str): - raise ValueError("Expected string") - - return e - - return validate - - -def validate_const(v): - def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: - return None - - if v != c: - raise ValueError(f"Expected {v}") - - return c - - return validate - - -def unmarshal_json(raw, typ: Any) -> Any: - return unmarshal(from_json(raw), typ) - - -def unmarshal(val, typ: Any) -> Any: - unmarshaller = create_model( - "Unmarshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = unmarshaller(body=val) - - # pyright: ignore[reportAttributeAccessIssue] - return m.body # type: ignore - - -def marshal_json(val, typ): - if is_nullable(typ) and val is None: - return "null" - - marshaller = create_model( - "Marshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = marshaller(body=val) - - d = m.model_dump(by_alias=True, mode="json", exclude_none=True) - - if len(d) == 0: - return "" - - return json.dumps(d[next(iter(d))], separators=(",", ":")) - - -def is_nullable(field): - origin = get_origin(field) - if origin is Nullable or origin is OptionalNullable: - return True - - if not origin is Union or type(None) not in get_args(field): - return False - - for arg in get_args(field): - if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: - return True - - return False - - -def stream_to_text(stream: httpx.Response) -> str: - return "".join(stream.iter_text()) - - -async def stream_to_text_async(stream: httpx.Response) -> str: - return "".join([chunk async for chunk in stream.aiter_text()]) - - -def stream_to_bytes(stream: httpx.Response) -> bytes: - return stream.content - - -async def stream_to_bytes_async(stream: httpx.Response) -> bytes: - return await stream.aread() - - -def get_pydantic_model(data: Any, typ: Any) -> Any: - if not _contains_pydantic_model(data): - return unmarshal(data, typ) - - return data - - -def _contains_pydantic_model(data: Any) -> bool: - if isinstance(data, BaseModel): - return True - if isinstance(data, List): - return any(_contains_pydantic_model(item) for item in data) - if isinstance(data, Dict): - return any(_contains_pydantic_model(value) for value in data.values()) - - return False diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 9ffdc439..00000000 --- a/poetry.lock +++ /dev/null @@ -1,1423 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[[package]] -name = "anyio" -version = "4.5.2" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, - {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] -trio = ["trio (>=0.26.1)"] - -[[package]] -name = "astroid" -version = "3.2.4" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "authlib" -version = "1.6.0" -description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -optional = true -python-versions = ">=3.9" -groups = ["main"] -markers = "extra == \"agents\"" -files = [ - {file = "authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d"}, - {file = "authlib-1.6.0.tar.gz", hash = "sha256:4367d32031b7af175ad3a323d571dc7257b7099d55978087ceae4a0d88cd3210"}, -] - -[package.dependencies] -cryptography = "*" - -[[package]] -name = "cachetools" -version = "5.5.0" -description = "Extensible memoizing collections and decorators" -optional = true -python-versions = ">=3.7" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, - {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, -] - -[[package]] -name = "certifi" -version = "2024.8.30" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, -] - -[[package]] -name = "cffi" -version = "1.17.1" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, - {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, - {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, - {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, - {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, - {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, - {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, - {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, - {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, - {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, - {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, - {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, - {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, - {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, - {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, - {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, -] -markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "charset-normalizer" -version = "3.4.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = true -python-versions = ">=3.7.0" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, -] - -[[package]] -name = "click" -version = "8.2.1" -description = "Composable command line interface toolkit" -optional = true -python-versions = ">=3.10" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, - {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] -markers = {main = "extra == \"agents\"", dev = "sys_platform == \"win32\""} - -[[package]] -name = "cryptography" -version = "43.0.3" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -files = [ - {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, - {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, - {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, - {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, - {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, - {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, - {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, - {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, - {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, - {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, - {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, -] -markers = {main = "python_version < \"3.11\" and extra == \"agents\"", dev = "python_version < \"3.11\""} - -[package.dependencies] -cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] -nox = ["nox"] -pep8test = ["check-sdist", "click", "mypy", "ruff"] -sdist = ["build"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] -test-randomorder = ["pytest-randomly"] - -[[package]] -name = "cryptography" -version = "45.0.3" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = false -python-versions = "!=3.9.0,!=3.9.1,>=3.7" -groups = ["main", "dev"] -files = [ - {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"}, - {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"}, - {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"}, - {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"}, - {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"}, - {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"}, - {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"}, - {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"}, - {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"}, - {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, - {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, -] -markers = {main = "python_version >= \"3.11\" and extra == \"agents\"", dev = "python_version >= \"3.11\""} - -[package.dependencies] -cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] -docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] -nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] -pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] -sdist = ["build (>=1.0.0)"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] -test-randomorder = ["pytest-randomly"] - -[[package]] -name = "dill" -version = "0.3.9" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, - {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "eval-type-backport" -version = "0.2.0" -description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, - {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "google-auth" -version = "2.27.0" -description = "Google Authentication Library" -optional = true -python-versions = ">=3.7" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, - {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = ">=3.1.4,<5" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0.dev0)"] - -[[package]] -name = "griffe" -version = "1.7.3" -description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -optional = true -python-versions = ">=3.9" -groups = ["main"] -markers = "extra == \"agents\"" -files = [ - {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, - {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, -] - -[package.dependencies] -colorama = ">=0.4" - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.7" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, - {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<1.0)"] - -[[package]] -name = "httpx" -version = "0.28.1" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" - -[package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "httpx-sse" -version = "0.4.0" -description = "Consume Server-Sent Event (SSE) messages with HTTPX." -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, - {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, -] - -[[package]] -name = "idna" -version = "3.10" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, -] - -[package.extras] -all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mcp" -version = "1.9.1" -description = "Model Context Protocol SDK" -optional = true -python-versions = ">=3.10" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "mcp-1.9.1-py3-none-any.whl", hash = "sha256:2900ded8ffafc3c8a7bfcfe8bc5204037e988e753ec398f371663e6a06ecd9a9"}, - {file = "mcp-1.9.1.tar.gz", hash = "sha256:19879cd6dde3d763297617242888c2f695a95dfa854386a6a68676a646ce75e4"}, -] - -[package.dependencies] -anyio = ">=4.5" -httpx = ">=0.27" -httpx-sse = ">=0.4" -pydantic = ">=2.7.2,<3.0.0" -pydantic-settings = ">=2.5.2" -python-multipart = ">=0.0.9" -sse-starlette = ">=1.6.1" -starlette = ">=0.27" -uvicorn = {version = ">=0.23.1", markers = "sys_platform != \"emscripten\""} - -[package.extras] -cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"] -rich = ["rich (>=13.9.4)"] -ws = ["websockets (>=15.0.1)"] - -[[package]] -name = "mypy" -version = "1.15.0" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.9" -groups = ["dev", "lint"] -files = [ - {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, - {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, - {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, - {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, - {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, - {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, - {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, - {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, - {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, - {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, - {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, - {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, - {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, - {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, -] - -[package.dependencies] -mypy_extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -faster-cache = ["orjson"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["dev", "lint"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["lint"] -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - -[[package]] -name = "packaging" -version = "24.2" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, - {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, -] - -[[package]] -name = "platformdirs" -version = "4.3.6" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, -] - -[package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pyasn1" -version = "0.6.1" -description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, - {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.1" -description = "A collection of ASN.1-based protocols modules" -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, - {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, -] - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.7.0" - -[[package]] -name = "pycparser" -version = "2.22" -description = "C parser in Python" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, -] -markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} - -[[package]] -name = "pydantic" -version = "2.10.6" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, -] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" -typing-extensions = ">=4.12.2" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] - -[[package]] -name = "pydantic-core" -version = "2.27.2" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pydantic-settings" -version = "2.9.1" -description = "Settings management using Pydantic" -optional = true -python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, - {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, -] - -[package.dependencies] -pydantic = ">=2.7.0" -python-dotenv = ">=0.21.0" -typing-inspection = ">=0.4.0" - -[package.extras] -aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] -azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] -gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] -toml = ["tomli (>=2.0.1)"] -yaml = ["pyyaml (>=6.0.1)"] - -[[package]] -name = "pylint" -version = "3.2.3" -description = "python code static checker" -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, -] - -[package.dependencies] -astroid = ">=3.2.2,<=3.3.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, -] -isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pyright" -version = "1.1.401" -description = "Command line wrapper for pyright" -optional = false -python-versions = ">=3.7" -groups = ["lint"] -files = [ - {file = "pyright-1.1.401-py3-none-any.whl", hash = "sha256:6fde30492ba5b0d7667c16ecaf6c699fab8d7a1263f6a18549e0b00bf7724c06"}, - {file = "pyright-1.1.401.tar.gz", hash = "sha256:788a82b6611fa5e34a326a921d86d898768cddf59edde8e93e56087d277cc6f1"}, -] - -[package.dependencies] -nodeenv = ">=1.6.0" -typing-extensions = ">=4.1" - -[package.extras] -all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] -dev = ["twine (>=3.4.1)"] -nodejs = ["nodejs-wheel-binaries"] - -[[package]] -name = "pytest" -version = "8.3.4" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, - {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.23.8" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, -] - -[package.dependencies] -pytest = ">=7.0.0,<9" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-dotenv" -version = "1.1.0" -description = "Read key-value pairs from a .env file and set them as environment variables" -optional = true -python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, - {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, -] - -[package.extras] -cli = ["click (>=5.0)"] - -[[package]] -name = "python-multipart" -version = "0.0.20" -description = "A streaming multipart parser for Python" -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, - {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, -] - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rsa" -version = "4.2" -description = "Pure-Python RSA implementation" -optional = true -python-versions = "*" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "ruff" -version = "0.11.11" -description = "An extremely fast Python linter and code formatter, written in Rust." -optional = false -python-versions = ">=3.7" -groups = ["lint"] -files = [ - {file = "ruff-0.11.11-py3-none-linux_armv6l.whl", hash = "sha256:9924e5ae54125ed8958a4f7de320dab7380f6e9fa3195e3dc3b137c6842a0092"}, - {file = "ruff-0.11.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8a93276393d91e952f790148eb226658dd275cddfde96c6ca304873f11d2ae4"}, - {file = "ruff-0.11.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d6e333dbe2e6ae84cdedefa943dfd6434753ad321764fd937eef9d6b62022bcd"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7885d9a5e4c77b24e8c88aba8c80be9255fa22ab326019dac2356cff42089fc6"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b5ab797fcc09121ed82e9b12b6f27e34859e4227080a42d090881be888755d4"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e231ff3132c1119ece836487a02785f099a43992b95c2f62847d29bace3c75ac"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a97c9babe1d4081037a90289986925726b802d180cca784ac8da2bbbc335f709"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8c4ddcbe8a19f59f57fd814b8b117d4fcea9bee7c0492e6cf5fdc22cfa563c8"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6224076c344a7694c6fbbb70d4f2a7b730f6d47d2a9dc1e7f9d9bb583faf390b"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:882821fcdf7ae8db7a951df1903d9cb032bbe838852e5fc3c2b6c3ab54e39875"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:dcec2d50756463d9df075a26a85a6affbc1b0148873da3997286caf1ce03cae1"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:99c28505ecbaeb6594701a74e395b187ee083ee26478c1a795d35084d53ebd81"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9263f9e5aa4ff1dec765e99810f1cc53f0c868c5329b69f13845f699fe74f639"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:64ac6f885e3ecb2fdbb71de2701d4e34526651f1e8503af8fb30d4915a3fe345"}, - {file = "ruff-0.11.11-py3-none-win32.whl", hash = "sha256:1adcb9a18802268aaa891ffb67b1c94cd70578f126637118e8099b8e4adcf112"}, - {file = "ruff-0.11.11-py3-none-win_amd64.whl", hash = "sha256:748b4bb245f11e91a04a4ff0f96e386711df0a30412b9fe0c74d5bdc0e4a531f"}, - {file = "ruff-0.11.11-py3-none-win_arm64.whl", hash = "sha256:6c51f136c0364ab1b774767aa8b86331bd8e9d414e2d107db7a2189f35ea1f7b"}, - {file = "ruff-0.11.11.tar.gz", hash = "sha256:7774173cc7c1980e6bf67569ebb7085989a78a103922fb83ef3dfe230cd0687d"}, -] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "sse-starlette" -version = "2.1.3" -description = "SSE plugin for Starlette" -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772"}, - {file = "sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169"}, -] - -[package.dependencies] -anyio = "*" -starlette = "*" -uvicorn = "*" - -[package.extras] -examples = ["fastapi"] - -[[package]] -name = "starlette" -version = "0.46.2" -description = "The little ASGI library that shines." -optional = true -python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, - {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, -] - -[package.dependencies] -anyio = ">=3.6.2,<5" - -[package.extras] -full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] - -[[package]] -name = "tomli" -version = "2.2.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.8" -groups = ["dev", "lint"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.2" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, - {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, -] - -[[package]] -name = "types-authlib" -version = "1.5.0.20250516" -description = "Typing stubs for Authlib" -optional = false -python-versions = ">=3.9" -groups = ["dev"] -files = [ - {file = "types_authlib-1.5.0.20250516-py3-none-any.whl", hash = "sha256:c553659ba00b7e5f98d1bc183a47224a882de5d32c07917b1587a6a22ddd2583"}, - {file = "types_authlib-1.5.0.20250516.tar.gz", hash = "sha256:6d11b46622c4c338087d059e9036887408c788cf254f0fb11ff69f2a85ca7231"}, -] - -[package.dependencies] -cryptography = "*" - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20241003" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, - {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev", "lint"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -] - -[package.dependencies] -typing-extensions = ">=4.12.0" - -[[package]] -name = "urllib3" -version = "2.2.3" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "uvicorn" -version = "0.34.2" -description = "The lightning-fast ASGI server." -optional = true -python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" -files = [ - {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, - {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, -] - -[package.dependencies] -click = ">=7.0" -h11 = ">=0.8" -typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} - -[package.extras] -standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[extras] -agents = ["authlib", "griffe", "mcp"] -gcp = ["google-auth", "requests"] - -[metadata] -lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "f111068ee90dcada908f5064a1ed67f027a728ababa2bb6bd9e6957957fc5c6c" diff --git a/poetry.toml b/poetry.toml deleted file mode 100644 index ab1033bd..00000000 --- a/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/pylintrc b/pylintrc index 29202a96..d1653ae1 100644 --- a/pylintrc +++ b/pylintrc @@ -90,7 +90,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no @@ -103,7 +103,7 @@ source-roots=src # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. -suggestion-mode=yes +# Note: suggestion-mode was removed in pylint 3.0 # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. @@ -641,7 +641,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/pyproject.toml b/pyproject.toml index 123c0fe9..c42e4260 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,68 +1,112 @@ [project] name = "mistralai" -version = "1.9.3" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API." -authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" -requires-python = ">=3.9" +authors = [{ name = "Mistral" }] +requires-python = ">=3.10" +readme = "README.md" dependencies = [ "eval-type-backport >=0.2.0", "httpx >=0.28.1", - "pydantic >=2.10.3", + "pydantic >=2.11.2", "python-dateutil >=2.8.2", "typing-inspection >=0.4.0", + "pyyaml (>=6.0.2,<7.0.0)", + "invoke (>=2.2.0,<3.0.0)", + "opentelemetry-sdk (>=1.33.1,<2.0.0)", + "opentelemetry-api (>=1.33.1,<2.0.0)", + "opentelemetry-exporter-otlp-proto-http (>=1.37.0,<2.0.0)", + "opentelemetry-semantic-conventions (>=0.59b0,<0.61)", ] -[tool.poetry] -repository = "https://github.com/mistralai/client-python.git" -packages = [ - { include = "mistralai", from = "src" }, - { include = "mistralai_azure", from = "packages/mistralai_azure/src" }, - { include = "mistralai_gcp", from = "packages/mistralai_gcp/src" }, +[project.optional-dependencies] +gcp = [ + "google-auth >=2.27.0", + "requests >=2.32.3", +] +agents = [ + "mcp >=1.0,<2.0", + "griffe >=1.7.3,<2.0", + "authlib >=1.5.2,<2.0", +] +realtime = [ + "websockets >=13.0", ] -include = ["py.typed", "src/mistralai/py.typed"] -[tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai/py.typed"] +[project.urls] +Repository = "https://github.com/mistralai/client-python.git" -[virtualenvs] -in-project = true +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", + "types-python-dateutil>=2.9.0.20240316,<3", + "types-authlib>=1.5.0.20250516,<2", + "types-pyyaml>=6.0.12.20250516,<7", + "mcp>=1.0,<2", + "griffe>=1.7.3,<2", + "authlib>=1.5.2,<2", + "websockets >=13.0", +] +lint = [ + "ruff>=0.11.10,<0.12", + "pyright>=1.1.401,<2", + "mypy==1.15.0", +] -[tool.poetry.group.dev.dependencies] -mypy = "==1.15.0" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" -types-authlib = "^1.5.0.20250516" +[tool.uv] +default-groups = [ + "dev", + "lint", +] -[tool.poetry.group.lint.dependencies] -ruff = "^0.11.10" -pyright = "^1.1.401" -mypy = "==1.15.0" +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/client/py.typed"] +[tool.hatch.build] +dev-mode-dirs = [ + "src", + "packages/azure/src", + "packages/gcp/src", +] -[project.optional-dependencies] -gcp = [ - "google-auth >=2.27.0", - "requests >=2.32.3" +[tool.hatch.build.targets.sdist] +include = [ + "src/mistralai", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", ] -agents = [ - "mcp >=1.0,<2.0; python_version >= '3.10'", - "griffe >=1.7.3,<2.0", - "authlib >=1.5.2,<2.0", + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/client/py.typed" = "src/mistralai/client/py.typed" + +[tool.hatch.build.targets.wheel] +include = [ + "src/mistralai", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", ] +[tool.hatch.build.targets.wheel.sources] +"src/mistralai" = "mistralai" +"packages/azure/src/mistralai/azure" = "mistralai/azure" +"packages/gcp/src/mistralai/gcp" = "mistralai/gcp" + [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" -pythonpath = ["src"] +pythonpath = ["src", "packages/azure/src", "packages/gcp/src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src:packages/azure/src:packages/gcp/src" [[tool.mypy.overrides]] module = "typing_inspect" @@ -73,8 +117,10 @@ module = [ "jsonpath.*", "typing_inspect.*", "authlib.*", + "websockets.*", "mcp.*", - "griffe.*" + "griffe.*", + "google.*" ] ignore_missing_imports = true diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 163bb3a6..4baa3d88 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -1,38 +1,91 @@ #!/usr/bin/env bash -set -e - ERRORS=0 +echo "Checking PEP 420 namespace integrity..." +if [ -f src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (core)" +fi +if [ -f packages/azure/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/azure/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (azure)" +fi +if [ -f packages/gcp/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/gcp/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (gcp)" +fi + echo "Running mypy..." -# TODO: Uncomment once the examples are fixed -# poetry run mypy examples/ || ERRORS=1 +echo "-> running on examples" +uv run mypy examples/ \ + --exclude 'audio/' || ERRORS=1 echo "-> running on extra" -poetry run mypy src/mistralai/extra/ || ERRORS=1 +uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -poetry run mypy src/mistralai/_hooks/ \ ---exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +uv run mypy src/mistralai/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run mypy packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run mypy packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run mypy packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run mypy packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 +echo "-> running on scripts" +uv run mypy scripts/ || ERRORS=1 echo "Running pyright..." # TODO: Uncomment once the examples are fixed -# poetry run pyright examples/ || ERRORS=1 +# uv run pyright examples/ || ERRORS=1 echo "-> running on extra" -poetry run pyright src/mistralai/extra/ || ERRORS=1 +uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -poetry run pyright src/mistralai/_hooks/ || ERRORS=1 +uv run pyright src/mistralai/client/_hooks/ || ERRORS=1 +echo "-> running on azure hooks" +uv run pyright packages/azure/src/mistralai/azure/client/_hooks/ || ERRORS=1 +echo "-> running on azure sdk" +uv run pyright packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run pyright packages/gcp/src/mistralai/gcp/client/_hooks/ || ERRORS=1 +echo "-> running on gcp sdk" +uv run pyright packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 +echo "-> running on scripts" +uv run pyright scripts/ || ERRORS=1 echo "Running ruff..." echo "-> running on examples" -poetry run ruff check examples/ || ERRORS=1 +uv run ruff check examples/ || ERRORS=1 echo "-> running on extra" -poetry run ruff check src/mistralai/extra/ || ERRORS=1 +uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -poetry run ruff check src/mistralai/_hooks/ \ ---exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +uv run ruff check src/mistralai/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run ruff check packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run ruff check packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run ruff check packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run ruff check packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 +echo "-> running on scripts" +uv run ruff check scripts/ || ERRORS=1 if [ "$ERRORS" -ne 0 ]; then -echo "❌ One or more linters failed" -exit 1 + echo "❌ One or more linters failed" + exit 1 else -echo "✅ All linters passed" + echo "✅ All linters passed" fi diff --git a/scripts/prepare_readme.py b/scripts/prepare_readme.py index 16f6fc7e..c220a055 100644 --- a/scripts/prepare_readme.py +++ b/scripts/prepare_readme.py @@ -1,33 +1,107 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - +import argparse import re -import shutil - -try: - with open("README.md", "r", encoding="utf-8") as rh: - readme_contents = rh.read() - GITHUB_URL = "https://github.com/mistralai/client-python.git" - GITHUB_URL = ( - GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL +import subprocess +import sys +from pathlib import Path + +DEFAULT_REPO_URL = "https://github.com/mistralai/client-python.git" +DEFAULT_BRANCH = "main" +LINK_PATTERN = re.compile(r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))") + + +def build_base_url(repo_url: str, branch: str, repo_subdir: str) -> str: + """Build the GitHub base URL used to rewrite relative README links.""" + normalized_repo_url = repo_url[:-4] if repo_url.endswith(".git") else repo_url + normalized_subdir = repo_subdir.strip("/") + if normalized_subdir: + normalized_subdir = f"{normalized_subdir}/" + return f"{normalized_repo_url}/blob/{branch}/{normalized_subdir}" + + +def rewrite_relative_links(contents: str, base_url: str) -> str: + """Rewrite Markdown relative links to absolute GitHub URLs.""" + return LINK_PATTERN.sub( + lambda match: f"{match.group(1)}{base_url}{match.group(2)}{match.group(3)}", + contents, + ) + + +def run_with_rewritten_readme( + readme_path: Path, base_url: str, command: list[str] +) -> int: + """Rewrite README links, run a command, and restore the original README.""" + original_contents = readme_path.read_text(encoding="utf-8") + rewritten_contents = rewrite_relative_links(original_contents, base_url) + readme_path.write_text(rewritten_contents, encoding="utf-8") + try: + if not command: + return 0 + result = subprocess.run(command, check=False) + return result.returncode + finally: + readme_path.write_text(original_contents, encoding="utf-8") + + +def parse_args(argv: list[str]) -> argparse.Namespace: + """Parse command-line arguments for README rewriting.""" + parser = argparse.ArgumentParser( + description=( + "Rewrite README links to absolute GitHub URLs while running a command." ) - # links on PyPI should have absolute URLs - readme_contents = re.sub( - r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", - lambda m: m.group(1) - + GITHUB_URL - + "/blob/master/" - + m.group(2) - + m.group(3), - readme_contents, + ) + parser.add_argument( + "--readme", + type=Path, + default=Path("README.md"), + help="Path to the README file to rewrite.", + ) + parser.add_argument( + "--repo-url", + default=DEFAULT_REPO_URL, + help="Repository URL used to build absolute links.", + ) + parser.add_argument( + "--branch", + default=DEFAULT_BRANCH, + help="Repository branch used for absolute links.", + ) + parser.add_argument( + "--repo-subdir", + default="", + help="Repository subdirectory that contains the README.", + ) + parser.add_argument( + "command", + nargs=argparse.REMAINDER, + help=( + "Command to run (prefix with -- to stop option parsing). " + "If omitted, the rewritten README is printed to stdout." + ), + ) + return parser.parse_args(argv) + + +def main(argv: list[str]) -> int: + """Entry point for rewriting README links during build commands.""" + args = parse_args(argv) + readme_path = args.readme + if not readme_path.is_file(): + raise FileNotFoundError(f"README file not found: {readme_path}") + base_url = build_base_url(args.repo_url, args.branch, args.repo_subdir) + command = ( + args.command[1:] + if args.command and args.command[0] == "--" + else args.command + ) + if not command: + rewritten_contents = rewrite_relative_links( + readme_path.read_text(encoding="utf-8"), + base_url, ) + sys.stdout.write(rewritten_contents) + return 0 + return run_with_rewritten_readme(readme_path, base_url, command) - with open("README-PYPI.md", "w", encoding="utf-8") as wh: - wh.write(readme_contents) -except Exception as e: - try: - print("Failed to rewrite README.md to README-PYPI.md, copying original instead") - print(e) - shutil.copyfile("README.md", "README-PYPI.md") - except Exception as ie: - print("Failed to copy README.md to README-PYPI.md") - print(ie) + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/scripts/publish.sh b/scripts/publish.sh index f2f2cf2c..c35748f3 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash -export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +uv run python scripts/prepare_readme.py -poetry run python scripts/prepare_readme.py - -poetry publish --build --skip-existing +uv build +uv publish --token $PYPI_TOKEN diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 6b281092..eca854b4 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -1,42 +1,124 @@ #!/bin/bash -# List of files to exclude +# Defaults +RETRY_COUNT=3 +NO_EXTRA_DEP=false + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --no-extra-dep) + NO_EXTRA_DEP=true + shift + ;; + --retry-count) + RETRY_COUNT="$2" + shift 2 + ;; + --help) + echo "Usage: $0 [--no-extra-dep] [--retry-count N]" + echo " --no-extra-dep: Exclude files that require extra dependencies" + echo " --retry-count N: Number of retries for each test (default: 3)" + echo " --help: Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +# List of files to always exclude exclude_files=( "examples/mistral/chat/chatbot_with_streaming.py" "examples/mistral/agents/async_conversation_run_mcp_remote_auth.py" - "examples/mistral/jobs/async_jobs_chat.py" + "examples/mistral/jobs/async_fine_tuning.py" + "examples/mistral/jobs/async_fine_tuning_chat.py" + "examples/mistral/jobs/fine_tuning.py" + "examples/mistral/jobs/fine_tuning_dry_run.py" + "examples/mistral/jobs/async_jobs_ocr_batch_annotation.py" "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" + "examples/mistral/agents/async_conversation_run_code_interpreter.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" + "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/audio/async_realtime_transcription_microphone.py" + "examples/mistral/audio/async_realtime_transcription_stream.py" + "examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py" ) -# Check if the first argument is "no-extra-dep" then remove all the files that require the extra dependencies -if [ "$1" = "--no-extra-dep" ]; then - # Add more files to the exclude list - exclude_files+=( - "examples/mistral/agents/async_conversation_run_mcp_remote.py" - "examples/mistral/agents/async_conversation_run_stream.py" - "examples/mistral/agents/async_conversation_run.py" - ) +# Files that require extra dependencies (agents, mcp, audio, etc.) +extra_dep_files=( + "examples/mistral/agents/" + "examples/mistral/mcp_servers/" + "examples/mistral/audio/" +) + +if [ "$NO_EXTRA_DEP" = true ]; then + for pattern in "${extra_dep_files[@]}"; do + for f in ${pattern}*.py; do + [ -f "$f" ] && exclude_files+=("$f") + done + done fi failed=0 +echo "Skipping scripts" +for file in "${exclude_files[@]}"; do + echo "$file" +done + +# Function to run a test with retries +run_test_with_retries() { + local file="$1" + local attempt=1 + local error_outputs=() + + while [ $attempt -le $RETRY_COUNT ]; do + echo "Running $file (attempt $attempt/$RETRY_COUNT)" + + # Run the script and capture both exit status and error output + current_output=$(python3 "$file" 2>&1) + exit_code=$? + + if [ $exit_code -eq 0 ]; then + echo "Success" + return 0 + else + # Store the error output from this attempt + error_outputs+=("Attempt $attempt: $current_output") + + if [ $attempt -lt $RETRY_COUNT ]; then + echo "Failed (attempt $attempt/$RETRY_COUNT), retrying..." + sleep 1 # Brief pause before retry + else + echo "Failed after $RETRY_COUNT attempts" + echo "Error outputs from all attempts:" + for error_output in "${error_outputs[@]}"; do + echo "$error_output" + echo "---" + done + return 1 + fi + fi + + attempt=$((attempt + 1)) + done +} + for file in examples/mistral/**/*.py; do # Check if the file is not in the exclude list if [ -f "$file" ] && [[ ! " ${exclude_files[@]} " =~ " $file " ]]; then - echo "Running $file" - # Run the script and capture the exit status - if python3 "$file" > /dev/null; then - echo "Success" - else - echo "Failed" + if ! run_test_with_retries "$file"; then failed=1 fi else - echo "Skipped $file" + echo "Skipped $file" fi done diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py deleted file mode 100644 index dd02e42e..00000000 --- a/src/mistralai/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) -from .sdk import * -from .sdkconfiguration import * -from .models import * - - -VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/src/mistralai/_hooks/__init__.py b/src/mistralai/_hooks/__init__.py deleted file mode 100644 index 2ee66cdd..00000000 --- a/src/mistralai/_hooks/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkhooks import * -from .types import * -from .registration import * diff --git a/src/mistralai/_hooks/registration.py b/src/mistralai/_hooks/registration.py deleted file mode 100644 index fc3ae79b..00000000 --- a/src/mistralai/_hooks/registration.py +++ /dev/null @@ -1,17 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .deprecation_warning import DeprecationWarningHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) - hooks.register_after_success_hook(DeprecationWarningHook()) diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py deleted file mode 100644 index 1f9a9316..00000000 --- a/src/mistralai/_hooks/sdkhooks.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from .types import ( - SDKInitHook, - BeforeRequestContext, - BeforeRequestHook, - AfterSuccessContext, - AfterSuccessHook, - AfterErrorContext, - AfterErrorHook, - Hooks, -) -from .registration import init_hooks -from typing import List, Optional, Tuple -from mistralai.httpclient import HttpClient - - -class SDKHooks(Hooks): - def __init__(self) -> None: - self.sdk_init_hooks: List[SDKInitHook] = [] - self.before_request_hooks: List[BeforeRequestHook] = [] - self.after_success_hooks: List[AfterSuccessHook] = [] - self.after_error_hooks: List[AfterErrorHook] = [] - init_hooks(self) - - def register_sdk_init_hook(self, hook: SDKInitHook) -> None: - self.sdk_init_hooks.append(hook) - - def register_before_request_hook(self, hook: BeforeRequestHook) -> None: - self.before_request_hooks.append(hook) - - def register_after_success_hook(self, hook: AfterSuccessHook) -> None: - self.after_success_hooks.append(hook) - - def register_after_error_hook(self, hook: AfterErrorHook) -> None: - self.after_error_hooks.append(hook) - - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - for hook in self.sdk_init_hooks: - base_url, client = hook.sdk_init(base_url, client) - return base_url, client - - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> httpx.Request: - for hook in self.before_request_hooks: - out = hook.before_request(hook_ctx, request) - if isinstance(out, Exception): - raise out - request = out - - return request - - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> httpx.Response: - for hook in self.after_success_hooks: - out = hook.after_success(hook_ctx, response) - if isinstance(out, Exception): - raise out - response = out - return response - - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: - for hook in self.after_error_hooks: - result = hook.after_error(hook_ctx, response, error) - if isinstance(result, Exception): - raise result - response, error = result - return response, error diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py deleted file mode 100644 index 6d0f3e11..00000000 --- a/src/mistralai/_hooks/types.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from abc import ABC, abstractmethod -import httpx -from mistralai.httpclient import HttpClient -from mistralai.sdkconfiguration import SDKConfiguration -from typing import Any, Callable, List, Optional, Tuple, Union - - -class HookContext: - config: SDKConfiguration - base_url: str - operation_id: str - oauth2_scopes: Optional[List[str]] = None - security_source: Optional[Union[Any, Callable[[], Any]]] = None - - def __init__( - self, - config: SDKConfiguration, - base_url: str, - operation_id: str, - oauth2_scopes: Optional[List[str]], - security_source: Optional[Union[Any, Callable[[], Any]]], - ): - self.config = config - self.base_url = base_url - self.operation_id = operation_id - self.oauth2_scopes = oauth2_scopes - self.security_source = security_source - - -class BeforeRequestContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterSuccessContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterErrorContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class SDKInitHook(ABC): - @abstractmethod - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - pass - - -class BeforeRequestHook(ABC): - @abstractmethod - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - pass - - -class AfterSuccessHook(ABC): - @abstractmethod - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> Union[httpx.Response, Exception]: - pass - - -class AfterErrorHook(ABC): - @abstractmethod - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: - pass - - -class Hooks(ABC): - @abstractmethod - def register_sdk_init_hook(self, hook: SDKInitHook): - pass - - @abstractmethod - def register_before_request_hook(self, hook: BeforeRequestHook): - pass - - @abstractmethod - def register_after_success_hook(self, hook: AfterSuccessHook): - pass - - @abstractmethod - def register_after_error_hook(self, hook: AfterErrorHook): - pass diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py deleted file mode 100644 index 5937a745..00000000 --- a/src/mistralai/_version.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import importlib.metadata - -__title__: str = "mistralai" -__version__: str = "1.9.3" -__openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.3 2.634.2 1.0.0 mistralai" - -try: - if __package__ is not None: - __version__ = importlib.metadata.version(__package__) -except importlib.metadata.PackageNotFoundError: - pass diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py deleted file mode 100644 index f9522a28..00000000 --- a/src/mistralai/async_client.py +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Optional - -from .client import MIGRATION_MESSAGE - - -class MistralAsyncClient: - def __init__( - self, - api_key: Optional[str] = None, - endpoint: str = "", - max_retries: int = 5, - timeout: int = 120, - max_concurrent_requests: int = 64, - ): - raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py deleted file mode 100644 index 66934a86..00000000 --- a/src/mistralai/audio.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.transcriptions import Transcriptions - - -class Audio(BaseSDK): - transcriptions: Transcriptions - r"""API for audio transcription.""" - - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.transcriptions = Transcriptions(self.sdk_configuration) diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py deleted file mode 100644 index bb37a1ee..00000000 --- a/src/mistralai/basesdk.py +++ /dev/null @@ -1,354 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkconfiguration import SDKConfiguration -import httpx -from mistralai import models, utils -from mistralai._hooks import ( - AfterErrorContext, - AfterSuccessContext, - BeforeRequestContext, -) -from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Mapping, Optional, Tuple -from urllib.parse import parse_qs, urlparse - - -class BaseSDK: - sdk_configuration: SDKConfiguration - - def __init__(self, sdk_config: SDKConfiguration) -> None: - self.sdk_configuration = sdk_config - - def _get_url(self, base_url, url_variables): - sdk_url, sdk_variables = self.sdk_configuration.get_server_details() - - if base_url is None: - base_url = sdk_url - - if url_variables is None: - url_variables = sdk_variables - - return utils.template_url(base_url, url_variables) - - def _build_request_async( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.async_client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - ) - - def _build_request( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - ) - - def _build_request_with_client( - self, - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Request: - query_params = {} - - url = url_override - if url is None: - url = utils.generate_url( - self._get_url(base_url, url_variables), - path, - request if request_has_path_params else None, - _globals if request_has_path_params else None, - ) - - query_params = utils.get_query_params( - request if request_has_query_params else None, - _globals if request_has_query_params else None, - ) - else: - # Pick up the query parameter from the override so they can be - # preserved when building the request later on (necessary as of - # httpx 0.28). - parsed_override = urlparse(str(url_override)) - query_params = parse_qs(parsed_override.query, keep_blank_values=True) - - headers = utils.get_headers(request, _globals) - headers["Accept"] = accept_header_value - headers[user_agent_header] = self.sdk_configuration.user_agent - - if security is not None: - if callable(security): - security = security() - security = utils.get_security_from_env(security, models.Security) - if security is not None: - security_headers, security_query_params = utils.get_security(security) - headers = {**headers, **security_headers} - query_params = {**query_params, **security_query_params} - - serialized_request_body = SerializedRequestBody() - if get_serialized_body is not None: - rb = get_serialized_body() - if request_body_required and rb is None: - raise ValueError("request body is required") - - if rb is not None: - serialized_request_body = rb - - if ( - serialized_request_body.media_type is not None - and serialized_request_body.media_type - not in ( - "multipart/form-data", - "multipart/mixed", - ) - ): - headers["content-type"] = serialized_request_body.media_type - - if http_headers is not None: - for header, value in http_headers.items(): - headers[header] = value - - timeout = timeout_ms / 1000 if timeout_ms is not None else None - - return client.build_request( - method, - url, - params=query_params, - content=serialized_request_body.content, - data=serialized_request_body.data, - files=serialized_request_body.files, - headers=headers, - timeout=timeout, - ) - - def do_request( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.client - logger = self.sdk_configuration.debug_logger - - hooks = self.sdk_configuration.__dict__["_hooks"] - - def do(): - http_res = None - try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = client.send(req, stream=stream) - except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") - - return http_res - - if retry_config is not None: - http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) - else: - http_res = do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) - - return http_res - - async def do_request_async( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.async_client - logger = self.sdk_configuration.debug_logger - - hooks = self.sdk_configuration.__dict__["_hooks"] - - async def do(): - http_res = None - try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = await client.send(req, stream=stream) - except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") - - return http_res - - if retry_config is not None: - http_res = await utils.retry_async( - do, utils.Retries(retry_config[0], retry_config[1]) - ) - else: - http_res = await do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) - - return http_res diff --git a/src/mistralai/batch.py b/src/mistralai/batch.py deleted file mode 100644 index bb59abda..00000000 --- a/src/mistralai/batch.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.mistral_jobs import MistralJobs - - -class Batch(BaseSDK): - jobs: MistralJobs - - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.jobs = MistralJobs(self.sdk_configuration) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py deleted file mode 100644 index 3408d943..00000000 --- a/src/mistralai/beta.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.conversations import Conversations -from mistralai.libraries import Libraries -from mistralai.mistral_agents import MistralAgents - - -class Beta(BaseSDK): - conversations: Conversations - r"""(beta) Conversations API""" - agents: MistralAgents - r"""(beta) Agents API""" - libraries: Libraries - r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" - - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.conversations = Conversations(self.sdk_configuration) - self.agents = MistralAgents(self.sdk_configuration) - self.libraries = Libraries(self.sdk_configuration) diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py deleted file mode 100644 index 1ed067e8..00000000 --- a/src/mistralai/chat.py +++ /dev/null @@ -1,806 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, List, Mapping, Optional, Union - -# region imports -from typing import Type -from mistralai.extra import ( - convert_to_parsed_chat_completion_response, - response_format_from_pydantic_model, - CustomPydanticModel, - ParsedChatCompletionResponse, -) -# endregion imports - - -class Chat(BaseSDK): - r"""Chat Completion API.""" - - # region sdk-class-body - # Custom .parse methods for the Structure Outputs Feature. - - def parse( - self, response_format: Type[CustomPydanticModel], **kwargs: Any - ) -> ParsedChatCompletionResponse[CustomPydanticModel]: - """ - Parse the response using the provided response format. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .complete method - :return: The parsed response - """ - # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete - json_response_format = response_format_from_pydantic_model(response_format) - # Run the inference - response = self.complete(**kwargs, response_format=json_response_format) - # Parse response back to the input pydantic model - parsed_response = convert_to_parsed_chat_completion_response( - response, response_format - ) - return parsed_response - - async def parse_async( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> ParsedChatCompletionResponse[CustomPydanticModel]: - """ - Asynchronously parse the response using the provided response format. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .complete method - :return: The parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = await self.complete_async( # pylint: disable=E1125 - **kwargs, response_format=json_response_format - ) - parsed_response = convert_to_parsed_chat_completion_response( - response, response_format - ) - return parsed_response - - def parse_stream( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> eventstreaming.EventStream[models.CompletionEvent]: - """ - Parse the response using the provided response format. - For now the response will be in JSON format not in the input Pydantic model. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .stream method - :return: The JSON parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = self.stream(**kwargs, response_format=json_response_format) - return response - - async def parse_stream_async( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - """ - Asynchronously parse the response using the provided response format. - For now the response will be in JSON format not in the input Pydantic model. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .stream method - :return: The JSON parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = await self.stream_async( # pylint: disable=E1125 - **kwargs, response_format=json_response_format - ) - return response - - # endregion sdk-class-body - - def complete( - self, - *, - model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def complete_async( - self, - *, - model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def stream( - self, - *, - model: str, - messages: Union[ - List[models.ChatCompletionStreamRequestMessages], - List[models.ChatCompletionStreamRequestMessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models.ChatCompletionStreamRequestStop, - models.ChatCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def stream_async( - self, - *, - model: str, - messages: Union[ - List[models.ChatCompletionStreamRequestMessages], - List[models.ChatCompletionStreamRequestMessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models.ChatCompletionStreamRequestStop, - models.ChatCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = None, - tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/client.py b/src/mistralai/client.py deleted file mode 100644 index d3582f77..00000000 --- a/src/mistralai/client.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Optional - -MIGRATION_MESSAGE = "This client is deprecated. To migrate to the new client, please refer to this guide: https://github.com/mistralai/client-python/blob/main/MIGRATION.md. If you need to use this client anyway, pin your version to 0.4.2." - - -class MistralClient: - def __init__( - self, - api_key: Optional[str] = None, - endpoint: str = "", - max_retries: int = 5, - timeout: int = 120, - ): - raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py new file mode 100644 index 00000000..4b79610a --- /dev/null +++ b/src/mistralai/client/__init__.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f1b791f9d2a5 + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/src/mistralai/client/_hooks/__init__.py b/src/mistralai/client/_hooks/__init__.py new file mode 100644 index 00000000..66a04e37 --- /dev/null +++ b/src/mistralai/client/_hooks/__init__.py @@ -0,0 +1,6 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cef9ff97efd7 + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/client/_hooks/custom_user_agent.py similarity index 100% rename from src/mistralai/_hooks/custom_user_agent.py rename to src/mistralai/client/_hooks/custom_user_agent.py diff --git a/src/mistralai/_hooks/deprecation_warning.py b/src/mistralai/client/_hooks/deprecation_warning.py similarity index 100% rename from src/mistralai/_hooks/deprecation_warning.py rename to src/mistralai/client/_hooks/deprecation_warning.py diff --git a/src/mistralai/client/_hooks/registration.py b/src/mistralai/client/_hooks/registration.py new file mode 100644 index 00000000..58bebab0 --- /dev/null +++ b/src/mistralai/client/_hooks/registration.py @@ -0,0 +1,22 @@ +from .custom_user_agent import CustomUserAgentHook +from .deprecation_warning import DeprecationWarningHook +from .tracing import TracingHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + tracing_hook = TracingHook() + hooks.register_before_request_hook(CustomUserAgentHook()) + hooks.register_after_success_hook(DeprecationWarningHook()) + hooks.register_after_success_hook(tracing_hook) + hooks.register_before_request_hook(tracing_hook) + hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/client/_hooks/sdkhooks.py b/src/mistralai/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..ecf94240 --- /dev/null +++ b/src/mistralai/client/_hooks/sdkhooks.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ed1e485b2153 + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/mistralai/client/_hooks/tracing.py b/src/mistralai/client/_hooks/tracing.py new file mode 100644 index 00000000..b353d9bd --- /dev/null +++ b/src/mistralai/client/_hooks/tracing.py @@ -0,0 +1,75 @@ +import logging +from typing import Optional, Tuple, Union + +import httpx +from opentelemetry.trace import Span + +from mistralai.extra.observability.otel import ( + get_or_create_otel_tracer, + get_response_and_error, + get_traced_request_and_span, + get_traced_response, +) +from .types import ( + AfterErrorContext, + AfterErrorHook, + AfterSuccessContext, + AfterSuccessHook, + BeforeRequestContext, + BeforeRequestHook, +) + +logger = logging.getLogger(__name__) + + +class TracingHook(BeforeRequestHook, AfterSuccessHook, AfterErrorHook): + def __init__(self) -> None: + self.tracing_enabled, self.tracer = get_or_create_otel_tracer() + self.request_span: Optional[Span] = None + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + # Refresh tracer/provider per request so tracing can be enabled if the + # application configures OpenTelemetry after the client is instantiated. + self.tracing_enabled, self.tracer = get_or_create_otel_tracer() + self.request_span = None + request, self.request_span = get_traced_request_and_span( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + request=request, + ) + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + response = get_traced_response( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + response=response, + ) + self.request_span = None + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + if response: + response, error = get_response_and_error( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + response=response, + error=error, + ) + self.request_span = None + return response, error diff --git a/src/mistralai/client/_hooks/types.py b/src/mistralai/client/_hooks/types.py new file mode 100644 index 00000000..036d44b8 --- /dev/null +++ b/src/mistralai/client/_hooks/types.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85cfedfb7582 + +from abc import ABC, abstractmethod +import httpx +from mistralai.client.httpclient import HttpClient +from mistralai.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py new file mode 100644 index 00000000..ab2cf01d --- /dev/null +++ b/src/mistralai/client/_version.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cc807b30de19 + +import importlib.metadata + +__title__: str = "mistralai" +__version__: str = "2.0.0b1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/src/mistralai/accesses.py b/src/mistralai/client/accesses.py similarity index 79% rename from src/mistralai/accesses.py rename to src/mistralai/client/accesses.py index 67061b7e..0761b0bc 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/client/accesses.py @@ -1,14 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76fc53bfcf59 from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional class Accesses(BaseSDK): + r"""(beta) Libraries API - manage access to a library.""" + def list( self, *, @@ -55,6 +59,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -71,7 +76,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -83,31 +88,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListSharingOut) + return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -155,6 +149,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -171,7 +166,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -183,40 +178,29 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListSharingOut) + return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def update_or_create( self, *, library_id: str, - org_id: str, level: models.ShareEnum, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -227,10 +211,10 @@ def update_or_create( Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. :param library_id: - :param org_id: :param level: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -272,6 +256,7 @@ def update_or_create( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_in, False, False, "json", models.SharingIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -288,7 +273,7 @@ def update_or_create( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -300,40 +285,29 @@ def update_or_create( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def update_or_create_async( self, *, library_id: str, - org_id: str, level: models.ShareEnum, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -344,10 +318,10 @@ async def update_or_create_async( Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. :param library_id: - :param org_id: :param level: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -389,6 +363,7 @@ async def update_or_create_async( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_in, False, False, "json", models.SharingIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -405,7 +380,7 @@ async def update_or_create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -417,39 +392,28 @@ async def update_or_create_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, *, library_id: str, - org_id: str, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -460,9 +424,9 @@ def delete( Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. :param library_id: - :param org_id: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -503,6 +467,7 @@ def delete( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_delete, False, False, "json", models.SharingDelete ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -519,7 +484,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -531,39 +496,28 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, *, library_id: str, - org_id: str, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -574,9 +528,9 @@ async def delete_async( Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. :param library_id: - :param org_id: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -617,6 +571,7 @@ async def delete_async( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_delete, False, False, "json", models.SharingDelete ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -633,7 +588,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -645,28 +600,17 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/agents.py b/src/mistralai/client/agents.py similarity index 76% rename from src/mistralai/agents.py rename to src/mistralai/client/agents.py index 48c06372..2b70d152 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/client/agents.py @@ -1,11 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e946546e3eaa from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, List, Mapping, Optional, Union +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union class Agents(BaseSDK): @@ -15,8 +17,8 @@ def complete( self, *, messages: Union[ - List[models.AgentsCompletionRequestMessages], - List[models.AgentsCompletionRequestMessagesTypedDict], + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, @@ -28,6 +30,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -61,13 +64,14 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -90,8 +94,9 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] + messages, List[models.AgentsCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -127,6 +132,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -143,7 +149,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -155,38 +161,27 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, messages: Union[ - List[models.AgentsCompletionRequestMessages], - List[models.AgentsCompletionRequestMessagesTypedDict], + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, @@ -198,6 +193,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -231,13 +227,14 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -260,8 +257,9 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] + messages, List[models.AgentsCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -297,6 +295,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -313,7 +312,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -325,38 +324,27 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, messages: Union[ - List[models.AgentsCompletionStreamRequestMessages], - List[models.AgentsCompletionStreamRequestMessagesTypedDict], + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, @@ -368,6 +356,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -403,13 +392,14 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -432,8 +422,9 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] + messages, List[models.AgentsCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -469,6 +460,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -485,7 +477,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -502,39 +494,30 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, messages: Union[ - List[models.AgentsCompletionStreamRequestMessages], - List[models.AgentsCompletionStreamRequestMessagesTypedDict], + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, @@ -546,6 +529,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -581,13 +565,14 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -610,8 +595,9 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] + messages, List[models.AgentsCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -647,6 +633,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -663,7 +650,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -680,29 +667,20 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py new file mode 100644 index 00000000..f68f063c --- /dev/null +++ b/src/mistralai/client/audio.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7a8ed2e90d61 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.transcriptions import Transcriptions +from typing import Optional + +# region imports +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.extra.realtime import RealtimeTranscription +# endregion imports + + +class Audio(BaseSDK): + transcriptions: Transcriptions + r"""API for audio transcription.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) + + # region sdk-class-body + @property + def realtime(self) -> "RealtimeTranscription": + """Returns a client for real-time audio transcription via WebSocket.""" + if not hasattr(self, "_realtime"): + from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel + + self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init + + return self._realtime + + # endregion sdk-class-body diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py new file mode 100644 index 00000000..a976121b --- /dev/null +++ b/src/mistralai/client/basesdk.py @@ -0,0 +1,385 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7518c67b81ea + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.client import errors, models, utils +from mistralai.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + security = utils.get_security_from_env(security, models.Security) + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise errors.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None + ) + + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise errors.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py new file mode 100644 index 00000000..7e36fd0d --- /dev/null +++ b/src/mistralai/client/batch.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cffe114c7ac7 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.batch_jobs import BatchJobs +from typing import Optional + + +class Batch(BaseSDK): + jobs: BatchJobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = BatchJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py new file mode 100644 index 00000000..0e135b30 --- /dev/null +++ b/src/mistralai/client/batch_jobs.py @@ -0,0 +1,795 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3423fec25840 + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class BatchJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListBatchJobsResponse: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param order_by: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + order_by=order_by, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListBatchJobsResponse: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param order_by: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + order_by=order_by, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + endpoint: models.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateBatchJobRequest( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateBatchJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + endpoint: models.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateBatchJobRequest( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateBatchJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJob: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJob, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py new file mode 100644 index 00000000..65b761d1 --- /dev/null +++ b/src/mistralai/client/beta.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 981417f45147 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.beta_agents import BetaAgents +from mistralai.client.conversations import Conversations +from mistralai.client.libraries import Libraries +from typing import Optional + + +class Beta(BaseSDK): + conversations: Conversations + r"""(beta) Conversations API""" + agents: BetaAgents + r"""(beta) Agents API""" + libraries: Libraries + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.conversations = Conversations( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.agents = BetaAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py new file mode 100644 index 00000000..157c5de4 --- /dev/null +++ b/src/mistralai/client/beta_agents.py @@ -0,0 +1,2266 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b64ad29b7174 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class BetaAgents(BaseSDK): + r"""(beta) Agents API""" + + def create( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.CreateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + version_message=version_message, + ) + + req = self._build_request( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateAgentRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.CreateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + version_message=version_message, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateAgentRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: Filter by agent name + :param search: Search agents by name or ID + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + search=search, + id=id, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: Filter by agent name + :param search: Search agents by name or ID + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + search=search, + id=id, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + update_agent_request=models.UpdateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.UpdateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + version_message=version_message, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_agent_request, + False, + False, + "json", + models.UpdateAgentRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param version_message: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + update_agent_request=models.UpdateAgentRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.UpdateAgentRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + version_message=version_message, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_agent_request, + False, + False, + "json", + models.UpdateAgentRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_versions( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_versions_async( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_version( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_version_async( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create_version_alias( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_version_alias_async( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request_async( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_version_aliases( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_version_aliases_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete_version_alias( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_version_alias_async( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py new file mode 100644 index 00000000..13b9c01f --- /dev/null +++ b/src/mistralai/client/chat.py @@ -0,0 +1,807 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7eba0f088d47 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + +# region imports +from typing import Type + +from mistralai.extra.struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) +from mistralai.extra.utils.response_format import ( + CustomPydanticModel, + response_format_from_pydantic_model, +) +# endregion imports + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + # region sdk-class-body + # Custom .parse methods for the Structure Outputs Feature. + + def parse( + self, response_format: Type[CustomPydanticModel], **kwargs: Any + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete + json_response_format = response_format_from_pydantic_model(response_format) + # Run the inference + response = self.complete(**kwargs, response_format=json_response_format) + # Parse response back to the input pydantic model + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + async def parse_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Asynchronously parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.complete_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + def parse_stream( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStream[models.CompletionEvent]: + """ + Parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = self.stream(**kwargs, response_format=json_response_format) + return response + + async def parse_stream_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + """ + Asynchronously parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.stream_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + return response + + # endregion sdk-class-body + + def complete( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/classifiers.py b/src/mistralai/client/classifiers.py similarity index 76% rename from src/mistralai/classifiers.py rename to src/mistralai/client/classifiers.py index 6ebf7834..67199b60 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -1,11 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26e773725732 from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import Any, Mapping, Optional, Union +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union class Classifiers(BaseSDK): @@ -19,6 +21,7 @@ def moderate( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -28,6 +31,7 @@ def moderate( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -45,6 +49,7 @@ def moderate( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -64,6 +69,7 @@ def moderate( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -80,7 +86,7 @@ def moderate( config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -92,31 +98,20 @@ def moderate( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def moderate_async( self, @@ -126,6 +121,7 @@ async def moderate_async( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -135,6 +131,7 @@ async def moderate_async( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -152,6 +149,7 @@ async def moderate_async( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -171,6 +169,7 @@ async def moderate_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -187,7 +186,7 @@ async def moderate_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -199,38 +198,27 @@ async def moderate_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def moderate_chat( self, *, inputs: Union[ - models.ChatModerationRequestInputs, - models.ChatModerationRequestInputsTypedDict, + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -258,7 +246,9 @@ def moderate_chat( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), model=model, ) @@ -278,6 +268,7 @@ def moderate_chat( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatModerationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -294,7 +285,7 @@ def moderate_chat( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -306,38 +297,27 @@ def moderate_chat( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def moderate_chat_async( self, *, inputs: Union[ - models.ChatModerationRequestInputs, - models.ChatModerationRequestInputsTypedDict, + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -365,7 +345,9 @@ async def moderate_chat_async( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), model=model, ) @@ -385,6 +367,7 @@ async def moderate_chat_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatModerationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -401,7 +384,7 @@ async def moderate_chat_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -413,31 +396,20 @@ async def moderate_chat_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def classify( self, @@ -447,6 +419,7 @@ def classify( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -456,6 +429,7 @@ def classify( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -473,6 +447,7 @@ def classify( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -492,6 +467,7 @@ def classify( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -508,7 +484,7 @@ def classify( config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -520,31 +496,20 @@ def classify( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def classify_async( self, @@ -554,6 +519,7 @@ async def classify_async( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -563,6 +529,7 @@ async def classify_async( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -580,6 +547,7 @@ async def classify_async( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -599,6 +567,7 @@ async def classify_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -615,7 +584,7 @@ async def classify_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -627,37 +596,26 @@ async def classify_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def classify_chat( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + input: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -666,7 +624,7 @@ def classify_chat( r"""Chat Classifications :param model: - :param inputs: Chat to classify + :param input: Chat to classify :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -684,7 +642,7 @@ def classify_chat( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), + input=utils.get_pydantic_model(input, models.Inputs), ) req = self._build_request( @@ -703,6 +661,7 @@ def classify_chat( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -719,7 +678,7 @@ def classify_chat( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -731,37 +690,26 @@ def classify_chat( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def classify_chat_async( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + input: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -770,7 +718,7 @@ async def classify_chat_async( r"""Chat Classifications :param model: - :param inputs: Chat to classify + :param input: Chat to classify :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -788,7 +736,7 @@ async def classify_chat_async( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), + input=utils.get_pydantic_model(input, models.Inputs), ) req = self._build_request_async( @@ -807,6 +755,7 @@ async def classify_chat_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -823,7 +772,7 @@ async def classify_chat_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -835,28 +784,17 @@ async def classify_chat_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py new file mode 100644 index 00000000..ec33b1fb --- /dev/null +++ b/src/mistralai/client/conversations.py @@ -0,0 +1,2864 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40692a878064 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + +# region imports +import typing +from typing import AsyncGenerator +import logging +from collections import defaultdict + +from mistralai.client.models import ( + ResponseStartedEvent, + ConversationEventsData, + InputEntries, +) +from mistralai.extra.run.result import ( + RunResult, + RunResultEvents, + FunctionResultEvent, + reconstitue_entries, +) +from mistralai.extra.run.utils import run_requirements +from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer + +logger = logging.getLogger(__name__) +tracing_enabled, tracer = get_or_create_otel_tracer() + +if typing.TYPE_CHECKING: + from mistralai.extra.run.context import RunContext + +# endregion imports + + +class Conversations(BaseSDK): + r"""(beta) Conversations API""" + + # region sdk-class-body + # Custom run code allowing client side execution of code + + @run_requirements + async def run_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RunResult: + """Run a conversation with the given inputs and context. + + The execution of a run will only stop when no required local execution can be done.""" + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel + + with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( # pylint: disable=logging-fstring-interpolation + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) + return run_result + + @run_requirements + async def run_stream_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + """Similar to `run_async` but returns a generator which streams events. + + The last streamed object is the RunResult object which summarises what happened in the run.""" + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + async def run_generator() -> ( + AsyncGenerator[Union[RunResultEvents, RunResult], None] + ): + current_entries = input_entries + while True: + received_event_tracker: defaultdict[ + int, list[ConversationEventsData] + ] = defaultdict(list) + if run_ctx.conversation_id is None: + res = await self.start_stream_async( + inputs=current_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + else: + res = await self.append_stream_async( + conversation_id=run_ctx.conversation_id, + inputs=current_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + async for event in res: + if ( + isinstance(event.data, ResponseStartedEvent) + and run_ctx.conversation_id is None + ): + run_result.conversation_id = event.data.conversation_id + run_ctx.conversation_id = event.data.conversation_id + logger.info( # pylint: disable=logging-fstring-interpolation + f"Started Run with conversation with id {run_ctx.conversation_id}" + ) + if ( + output_index := getattr(event.data, "output_index", None) + ) is not None: + received_event_tracker[output_index].append(event.data) + yield typing.cast(RunResultEvents, event) + run_ctx.request_count += 1 + outputs = reconstitue_entries(received_event_tracker) + run_result.output_entries.extend(outputs) + fcalls = get_function_calls(outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) + yield run_result + + return run_generator() + + # endregion sdk-class-body + + def start( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentsAPIV1ConversationsListResponse]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentsAPIV1ConversationsListResponse]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def append( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def append_async( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_history( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_history_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get_messages( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_messages_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def restart( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def restart_async( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def start_stream( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def start_stream_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTool]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def append_stream( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def append_stream_async( + self, + *, + conversation_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ + Union[ + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + def restart_stream( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def restart_stream_async( + self, + *, + conversation_id: str, + from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param from_entry_id: + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/documents.py b/src/mistralai/client/documents.py similarity index 77% rename from src/mistralai/documents.py rename to src/mistralai/client/documents.py index e43d3faf..b3130364 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/client/documents.py @@ -1,14 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bcc17286c31c from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import Any, Mapping, Optional, Union +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union class Documents(BaseSDK): + r"""(beta) Libraries API - manage documents in a library.""" + def list( self, *, @@ -16,14 +20,15 @@ def list( search: OptionalNullable[str] = UNSET, page_size: Optional[int] = 100, page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, sort_by: Optional[str] = "created_at", sort_order: Optional[str] = "desc", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: - r"""List document in a given library. + ) -> models.ListDocumentsResponse: + r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -31,6 +36,7 @@ def list( :param search: :param page_size: :param page: + :param filters_attributes: :param sort_by: :param sort_order: :param retries: Override the default retry configuration for this method @@ -53,6 +59,7 @@ def list( search=search, page_size=page_size, page=page, + filters_attributes=filters_attributes, sort_by=sort_by, sort_order=sort_order, ) @@ -70,6 +77,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -86,7 +94,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -98,31 +106,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListDocumentOut) + return unmarshal_json_response(models.ListDocumentsResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -131,14 +128,15 @@ async def list_async( search: OptionalNullable[str] = UNSET, page_size: Optional[int] = 100, page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, sort_by: Optional[str] = "created_at", sort_order: Optional[str] = "desc", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: - r"""List document in a given library. + ) -> models.ListDocumentsResponse: + r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -146,6 +144,7 @@ async def list_async( :param search: :param page_size: :param page: + :param filters_attributes: :param sort_by: :param sort_order: :param retries: Override the default retry configuration for this method @@ -168,6 +167,7 @@ async def list_async( search=search, page_size=page_size, page=page, + filters_attributes=filters_attributes, sort_by=sort_by, sort_order=sort_order, ) @@ -185,6 +185,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -201,7 +202,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -213,31 +214,20 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListDocumentOut) + return unmarshal_json_response(models.ListDocumentsResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def upload( self, @@ -248,13 +238,21 @@ def upload( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Upload a new document. Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search :param library_id: - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -272,7 +270,7 @@ def upload( request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), ), ) @@ -291,12 +289,9 @@ def upload( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, + request.request_body, False, False, "multipart", models.DocumentUpload ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -313,7 +308,7 @@ def upload( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_upload_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -325,31 +320,20 @@ def upload( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def upload_async( self, @@ -360,13 +344,21 @@ async def upload_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Upload a new document. Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search :param library_id: - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -384,7 +376,7 @@ async def upload_async( request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), ), ) @@ -403,12 +395,9 @@ async def upload_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, + request.request_body, False, False, "multipart", models.DocumentUpload ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -425,7 +414,7 @@ async def upload_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_upload_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -437,31 +426,20 @@ async def upload_async( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -472,7 +450,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Retrieve the metadata of a specific document. Given a library and a document in this library, you can retrieve the metadata of that document. @@ -512,6 +490,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -528,7 +507,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -540,31 +519,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -575,7 +543,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Retrieve the metadata of a specific document. Given a library and a document in this library, you can retrieve the metadata of that document. @@ -615,6 +583,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -631,7 +600,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -643,31 +612,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -675,11 +633,14 @@ def update( library_id: str, document_id: str, name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Update the metadata of a specific document. Given a library and a document in that library, update the name of that document. @@ -687,6 +648,7 @@ def update( :param library_id: :param document_id: :param name: + :param attributes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -705,8 +667,9 @@ def update( request = models.LibrariesDocumentsUpdateV1Request( library_id=library_id, document_id=document_id, - document_update_in=models.DocumentUpdateIn( + update_document_request=models.UpdateDocumentRequest( name=name, + attributes=attributes, ), ) @@ -724,12 +687,13 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, + request.update_document_request, False, False, "json", - models.DocumentUpdateIn, + models.UpdateDocumentRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -746,7 +710,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -758,31 +722,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -790,11 +743,14 @@ async def update_async( library_id: str, document_id: str, name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Update the metadata of a specific document. Given a library and a document in that library, update the name of that document. @@ -802,6 +758,7 @@ async def update_async( :param library_id: :param document_id: :param name: + :param attributes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -820,8 +777,9 @@ async def update_async( request = models.LibrariesDocumentsUpdateV1Request( library_id=library_id, document_id=document_id, - document_update_in=models.DocumentUpdateIn( + update_document_request=models.UpdateDocumentRequest( name=name, + attributes=attributes, ), ) @@ -839,12 +797,13 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, + request.update_document_request, False, False, "json", - models.DocumentUpdateIn, + models.UpdateDocumentRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -861,7 +820,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -873,31 +832,20 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -948,6 +896,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -964,7 +913,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -978,29 +927,18 @@ def delete( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -1051,6 +989,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1067,7 +1006,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1081,29 +1020,18 @@ async def delete_async( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def text_content( self, @@ -1154,6 +1082,7 @@ def text_content( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1170,7 +1099,7 @@ def text_content( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1182,31 +1111,20 @@ def text_content( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentTextContent) + return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def text_content_async( self, @@ -1257,6 +1175,7 @@ async def text_content_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1273,7 +1192,7 @@ async def text_content_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1285,31 +1204,20 @@ async def text_content_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentTextContent) + return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def status( self, @@ -1360,6 +1268,7 @@ def status( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1376,7 +1285,7 @@ def status( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_status_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1388,31 +1297,20 @@ def status( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ProcessingStatusOut) + return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def status_async( self, @@ -1463,6 +1361,7 @@ async def status_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1479,7 +1378,7 @@ async def status_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_status_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1491,31 +1390,20 @@ async def status_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ProcessingStatusOut) + return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def get_signed_url( self, @@ -1566,6 +1454,7 @@ def get_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1582,7 +1471,7 @@ def get_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1594,31 +1483,20 @@ def get_signed_url( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1669,6 +1547,7 @@ async def get_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1685,7 +1564,7 @@ async def get_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1697,31 +1576,20 @@ async def get_signed_url_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def extracted_text_signed_url( self, @@ -1772,6 +1640,7 @@ def extracted_text_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1788,7 +1657,7 @@ def extracted_text_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1800,31 +1669,20 @@ def extracted_text_signed_url( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def extracted_text_signed_url_async( self, @@ -1875,6 +1733,7 @@ async def extracted_text_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1891,7 +1750,7 @@ async def extracted_text_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1903,31 +1762,20 @@ async def extracted_text_signed_url_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def reprocess( self, @@ -1978,6 +1826,7 @@ def reprocess( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1994,7 +1843,7 @@ def reprocess( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2008,29 +1857,18 @@ def reprocess( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def reprocess_async( self, @@ -2081,6 +1919,7 @@ async def reprocess_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2097,7 +1936,7 @@ async def reprocess_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2111,26 +1950,15 @@ async def reprocess_async( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py new file mode 100644 index 00000000..5f9d3b9c --- /dev/null +++ b/src/mistralai/client/embeddings.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f9c17258207e + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Embeddings(BaseSDK): + r"""Embeddings API.""" + + def create( + self, + *, + model: str, + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request_async( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/errors/__init__.py b/src/mistralai/client/errors/__init__.py new file mode 100644 index 00000000..58a591a1 --- /dev/null +++ b/src/mistralai/client/errors/__init__.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b2db51246df + +from .mistralerror import MistralError +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/errors/httpvalidationerror.py b/src/mistralai/client/errors/httpvalidationerror.py new file mode 100644 index 00000000..97b16562 --- /dev/null +++ b/src/mistralai/client/errors/httpvalidationerror.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ac3de4a52bb6 + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.client.errors import MistralError +from mistralai.client.models import validationerror as models_validationerror +from mistralai.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[models_validationerror.ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/src/mistralai/client/errors/mistralerror.py b/src/mistralai/client/errors/mistralerror.py new file mode 100644 index 00000000..eb73040c --- /dev/null +++ b/src/mistralai/client/errors/mistralerror.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d1f57f0ff1e9 + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/errors/no_response_error.py b/src/mistralai/client/errors/no_response_error.py new file mode 100644 index 00000000..d71dfa7b --- /dev/null +++ b/src/mistralai/client/errors/no_response_error.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8b469ecb0906 + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/errors/responsevalidationerror.py b/src/mistralai/client/errors/responsevalidationerror.py new file mode 100644 index 00000000..a7b3b9f0 --- /dev/null +++ b/src/mistralai/client/errors/responsevalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6cfaa3147abe + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.errors import MistralError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/src/mistralai/client/errors/sdkerror.py b/src/mistralai/client/errors/sdkerror.py new file mode 100644 index 00000000..25b87255 --- /dev/null +++ b/src/mistralai/client/errors/sdkerror.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c489ffe1e9ca + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.errors import MistralError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/src/mistralai/files.py b/src/mistralai/client/files.py similarity index 78% rename from src/mistralai/files.py rename to src/mistralai/client/files.py index 05739eeb..a5f3adf6 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/client/files.py @@ -1,11 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f12df4b2ce43 from .basesdk import BaseSDK import httpx -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import List, Mapping, Optional, Union @@ -21,7 +23,7 @@ def upload( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: + ) -> models.CreateFileResponse: r"""Upload File Upload a file that can be used across various endpoints. @@ -30,7 +32,15 @@ def upload( Please contact us if you need to increase these storage limits. - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -47,7 +57,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + request = models.MultiPartBodyParams( purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -66,12 +76,9 @@ def upload( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, + request, False, False, "multipart", models.MultiPartBodyParams ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -88,7 +95,7 @@ def upload( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -99,26 +106,15 @@ def upload( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UploadFileOut) + return unmarshal_json_response(models.CreateFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def upload_async( self, @@ -129,7 +125,7 @@ async def upload_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: + ) -> models.CreateFileResponse: r"""Upload File Upload a file that can be used across various endpoints. @@ -138,7 +134,15 @@ async def upload_async( Please contact us if you need to increase these storage limits. - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -155,7 +159,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + request = models.MultiPartBodyParams( purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -174,12 +178,9 @@ async def upload_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, + request, False, False, "multipart", models.MultiPartBodyParams ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -196,7 +197,7 @@ async def upload_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -207,51 +208,44 @@ async def upload_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UploadFileOut) + return unmarshal_json_response(models.CreateFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def list( self, *, page: Optional[int] = 0, page_size: Optional[int] = 100, + include_total: Optional[bool] = True, sample_type: OptionalNullable[List[models.SampleType]] = UNSET, source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, purpose: OptionalNullable[models.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: + ) -> models.ListFilesResponse: r"""List Files Returns a list of files that belong to the user's organization. :param page: :param page_size: + :param include_total: :param sample_type: :param source: :param search: :param purpose: + :param mimetypes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -270,10 +264,12 @@ def list( request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, + include_total=include_total, sample_type=sample_type, source=source, search=search, purpose=purpose, + mimetypes=mimetypes, ) req = self._build_request( @@ -289,6 +285,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -305,7 +302,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -316,51 +313,44 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListFilesOut) + return unmarshal_json_response(models.ListFilesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, *, page: Optional[int] = 0, page_size: Optional[int] = 100, + include_total: Optional[bool] = True, sample_type: OptionalNullable[List[models.SampleType]] = UNSET, source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, purpose: OptionalNullable[models.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: + ) -> models.ListFilesResponse: r"""List Files Returns a list of files that belong to the user's organization. :param page: :param page_size: + :param include_total: :param sample_type: :param source: :param search: :param purpose: + :param mimetypes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -379,10 +369,12 @@ async def list_async( request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, + include_total=include_total, sample_type=sample_type, source=source, search=search, purpose=purpose, + mimetypes=mimetypes, ) req = self._build_request_async( @@ -398,6 +390,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -414,7 +407,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -425,26 +418,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListFilesOut) + return unmarshal_json_response(models.ListFilesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -454,7 +436,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: + ) -> models.GetFileResponse: r"""Retrieve File Returns information about a specific file. @@ -492,6 +474,7 @@ def retrieve( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -508,7 +491,7 @@ def retrieve( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -519,26 +502,15 @@ def retrieve( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.RetrieveFileOut) + return unmarshal_json_response(models.GetFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -548,7 +520,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: + ) -> models.GetFileResponse: r"""Retrieve File Returns information about a specific file. @@ -586,6 +558,7 @@ async def retrieve_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -602,7 +575,7 @@ async def retrieve_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -613,26 +586,15 @@ async def retrieve_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.RetrieveFileOut) + return unmarshal_json_response(models.GetFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -642,7 +604,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: + ) -> models.DeleteFileResponse: r"""Delete File Delete a file. @@ -680,6 +642,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -696,7 +659,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -707,26 +670,15 @@ def delete( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteFileOut) + return unmarshal_json_response(models.DeleteFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -736,7 +688,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: + ) -> models.DeleteFileResponse: r"""Delete File Delete a file. @@ -774,6 +726,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -790,7 +743,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -801,26 +754,15 @@ async def delete_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteFileOut) + return unmarshal_json_response(models.DeleteFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def download( self, @@ -868,6 +810,7 @@ def download( accept_header_value="application/octet-stream", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -884,7 +827,7 @@ def download( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -899,23 +842,13 @@ def download( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def download_async( self, @@ -963,6 +896,7 @@ async def download_async( accept_header_value="application/octet-stream", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -979,7 +913,7 @@ async def download_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -994,23 +928,13 @@ async def download_async( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def get_signed_url( self, @@ -1021,7 +945,7 @@ def get_signed_url( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: + ) -> models.GetSignedURLResponse: r"""Get Signed Url :param file_id: @@ -1059,6 +983,7 @@ def get_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1075,7 +1000,7 @@ def get_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1086,26 +1011,15 @@ def get_signed_url( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FileSignedURL) + return unmarshal_json_response(models.GetSignedURLResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1116,7 +1030,7 @@ async def get_signed_url_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: + ) -> models.GetSignedURLResponse: r"""Get Signed Url :param file_id: @@ -1154,6 +1068,7 @@ async def get_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1170,7 +1085,7 @@ async def get_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1181,23 +1096,12 @@ async def get_signed_url_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FileSignedURL) + return unmarshal_json_response(models.GetSignedURLResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py new file mode 100644 index 00000000..8ffb7730 --- /dev/null +++ b/src/mistralai/client/fim.py @@ -0,0 +1,542 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 217bea5d701d + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + def complete( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res, http_res_text + ) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py new file mode 100644 index 00000000..df6bc564 --- /dev/null +++ b/src/mistralai/client/fine_tuning.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5d5079bbd54e + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.fine_tuning_jobs import FineTuningJobs +from typing import Optional + + +class FineTuning(BaseSDK): + jobs: FineTuningJobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = FineTuningJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/fine_tuning_jobs.py b/src/mistralai/client/fine_tuning_jobs.py new file mode 100644 index 00000000..c2ee871b --- /dev/null +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -0,0 +1,1045 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa1ea246e0b2 + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class FineTuningJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFineTuningJobsResponse: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFineTuningJobsResponse: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + model: str, + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateFineTuningJobRequest( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] + ), + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateFineTuningJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateFineTuningJobRequest( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateFineTuningJobRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def start( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/httpclient.py b/src/mistralai/client/httpclient.py new file mode 100644 index 00000000..544af7f8 --- /dev/null +++ b/src/mistralai/client/httpclient.py @@ -0,0 +1,126 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e46bde74327 + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/src/mistralai/libraries.py b/src/mistralai/client/libraries.py similarity index 75% rename from src/mistralai/libraries.py rename to src/mistralai/client/libraries.py index 45bf0397..b8728362 100644 --- a/src/mistralai/libraries.py +++ b/src/mistralai/client/libraries.py @@ -1,30 +1,36 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d43a5f78045f from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.accesses import Accesses -from mistralai.documents import Documents -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.accesses import Accesses +from mistralai.client.documents import Documents +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional class Libraries(BaseSDK): - r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" documents: Documents + r"""(beta) Libraries API - manage documents in a library.""" accesses: Accesses + r"""(beta) Libraries API - manage access to a library.""" - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) self.sdk_configuration = sdk_config self._init_sdks() def _init_sdks(self): - self.documents = Documents(self.sdk_configuration) - self.accesses = Accesses(self.sdk_configuration) + self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) + self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) def list( self, @@ -33,7 +39,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: + ) -> models.ListLibrariesResponse: r"""List all libraries you have access to. List all libraries that you have created or have been shared with you. @@ -65,6 +71,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -81,7 +88,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -92,26 +99,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListLibraryOut) + return unmarshal_json_response(models.ListLibrariesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -120,7 +116,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: + ) -> models.ListLibrariesResponse: r"""List all libraries you have access to. List all libraries that you have created or have been shared with you. @@ -152,6 +148,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -168,7 +165,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -179,26 +176,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListLibraryOut) + return unmarshal_json_response(models.ListLibrariesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def create( self, @@ -210,7 +196,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Create a new Library. Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. @@ -233,7 +219,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.LibraryIn( + request = models.CreateLibraryRequest( name=name, description=description, chunk_size=chunk_size, @@ -253,8 +239,9 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn + request, False, False, "json", models.CreateLibraryRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -271,7 +258,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -283,31 +270,20 @@ def create( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -319,7 +295,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Create a new Library. Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. @@ -342,7 +318,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibraryIn( + request = models.CreateLibraryRequest( name=name, description=description, chunk_size=chunk_size, @@ -362,8 +338,9 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn + request, False, False, "json", models.CreateLibraryRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -380,7 +357,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -392,31 +369,20 @@ async def create_async( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -426,7 +392,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Detailed information about a specific Library. Given a library id, details information about that Library. @@ -464,6 +430,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -480,7 +447,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -492,31 +459,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -526,7 +482,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Detailed information about a specific Library. Given a library id, details information about that Library. @@ -564,6 +520,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -580,7 +537,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -592,31 +549,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -626,7 +572,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Delete a library and all of it's document. Given a library id, deletes it together with all documents that have been uploaded to that library. @@ -664,6 +610,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -680,7 +627,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -692,31 +639,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -726,7 +662,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Delete a library and all of it's document. Given a library id, deletes it together with all documents that have been uploaded to that library. @@ -764,6 +700,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -780,7 +717,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -792,31 +729,20 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -828,7 +754,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Update a library. Given a library id, you can update the name and description. @@ -853,7 +779,7 @@ def update( request = models.LibrariesUpdateV1Request( library_id=library_id, - library_in_update=models.LibraryInUpdate( + update_library_request=models.UpdateLibraryRequest( name=name, description=description, ), @@ -873,8 +799,13 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -891,7 +822,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -903,31 +834,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -939,7 +859,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Update a library. Given a library id, you can update the name and description. @@ -964,7 +884,7 @@ async def update_async( request = models.LibrariesUpdateV1Request( library_id=library_id, - library_in_update=models.LibraryInUpdate( + update_library_request=models.UpdateLibraryRequest( name=name, description=description, ), @@ -984,8 +904,13 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1002,7 +927,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1014,28 +939,17 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py new file mode 100644 index 00000000..5ef8b3f3 --- /dev/null +++ b/src/mistralai/client/models/__init__.py @@ -0,0 +1,2369 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e0e8dad92725 + +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .agent import ( + Agent, + AgentTool, + AgentToolTypedDict, + AgentTypedDict, + UnknownAgentTool, + ) + from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict + from .agentconversation import ( + AgentConversation, + AgentConversationAgentVersion, + AgentConversationAgentVersionTypedDict, + AgentConversationTypedDict, + ) + from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventTypedDict, + ) + from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict + from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, + ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) + from .agents_api_v1_agents_delete_aliasop import ( + AgentsAPIV1AgentsDeleteAliasRequest, + AgentsAPIV1AgentsDeleteAliasRequestTypedDict, + ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetAgentVersion, + AgentsAPIV1AgentsGetAgentVersionTypedDict, + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, + ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + AgentsAPIV1ConversationsListResponse, + AgentsAPIV1ConversationsListResponseTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) + from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessage, + AgentsCompletionRequestMessageTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, + ) + from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessage, + AgentsCompletionStreamRequestMessageTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, + ) + from .apiendpoint import APIEndpoint + from .archivemodelresponse import ( + ArchiveModelResponse, + ArchiveModelResponseTypedDict, + ) + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageTypedDict, + ) + from .audiochunk import AudioChunk, AudioChunkTypedDict + from .audioencoding import AudioEncoding + from .audioformat import AudioFormat, AudioFormatTypedDict + from .audiotranscriptionrequest import ( + AudioTranscriptionRequest, + AudioTranscriptionRequestTypedDict, + ) + from .audiotranscriptionrequeststream import ( + AudioTranscriptionRequestStream, + AudioTranscriptionRequestStreamTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict + from .batcherror import BatchError, BatchErrorTypedDict + from .batchjob import BatchJob, BatchJobTypedDict + from .batchjobstatus import BatchJobStatus + from .batchrequest import BatchRequest, BatchRequestTypedDict + from .builtinconnectors import BuiltInConnectors + from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs1, + ChatModerationRequestInputs1TypedDict, + ChatModerationRequestInputs2, + ChatModerationRequestInputs2TypedDict, + ChatModerationRequestInputs3, + ChatModerationRequestInputs3TypedDict, + ChatModerationRequestTypedDict, + ) + from .checkpoint import Checkpoint, CheckpointTypedDict + from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, + ) + from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, + ) + from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, + ) + from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, + ) + from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobIntegration, + ClassifierFineTuningJobIntegrationTypedDict, + ClassifierFineTuningJobStatus, + ClassifierFineTuningJobTypedDict, + UnknownClassifierFineTuningJobIntegration, + ) + from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsIntegration, + ClassifierFineTuningJobDetailsIntegrationTypedDict, + ClassifierFineTuningJobDetailsStatus, + ClassifierFineTuningJobDetailsTypedDict, + UnknownClassifierFineTuningJobDetailsIntegration, + ) + from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict + from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, + ) + from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, + ) + from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict + from .completionargs import CompletionArgs, CompletionArgsTypedDict + from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, + ) + from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobIntegration, + CompletionFineTuningJobIntegrationTypedDict, + CompletionFineTuningJobRepository, + CompletionFineTuningJobRepositoryTypedDict, + CompletionFineTuningJobStatus, + CompletionFineTuningJobTypedDict, + UnknownCompletionFineTuningJobIntegration, + UnknownCompletionFineTuningJobRepository, + ) + from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsIntegration, + CompletionFineTuningJobDetailsIntegrationTypedDict, + CompletionFineTuningJobDetailsRepository, + CompletionFineTuningJobDetailsRepositoryTypedDict, + CompletionFineTuningJobDetailsStatus, + CompletionFineTuningJobDetailsTypedDict, + UnknownCompletionFineTuningJobDetailsIntegration, + UnknownCompletionFineTuningJobDetailsRepository, + ) + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk + from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, + ) + from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, + ) + from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, + UnknownConversationEventsData, + ) + from .conversationhistory import ( + ConversationHistory, + ConversationHistoryTypedDict, + Entry, + EntryTypedDict, + ) + from .conversationinputs import ConversationInputs, ConversationInputsTypedDict + from .conversationmessages import ( + ConversationMessages, + ConversationMessagesTypedDict, + ) + from .conversationrequest import ( + ConversationRequest, + ConversationRequestAgentVersion, + ConversationRequestAgentVersionTypedDict, + ConversationRequestHandoffExecution, + ConversationRequestTool, + ConversationRequestToolTypedDict, + ConversationRequestTypedDict, + ) + from .conversationresponse import ( + ConversationResponse, + ConversationResponseTypedDict, + Output, + OutputTypedDict, + ) + from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestAgentVersion, + ConversationRestartRequestAgentVersionTypedDict, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, + ) + from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestAgentVersion, + ConversationRestartStreamRequestAgentVersionTypedDict, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, + ) + from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestAgentVersion, + ConversationStreamRequestAgentVersionTypedDict, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTool, + ConversationStreamRequestToolTypedDict, + ConversationStreamRequestTypedDict, + ) + from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkThinking, + ConversationThinkChunkThinkingTypedDict, + ConversationThinkChunkTypedDict, + ) + from .conversationusageinfo import ( + ConversationUsageInfo, + ConversationUsageInfoTypedDict, + ) + from .createagentrequest import ( + CreateAgentRequest, + CreateAgentRequestTool, + CreateAgentRequestToolTypedDict, + CreateAgentRequestTypedDict, + ) + from .createbatchjobrequest import ( + CreateBatchJobRequest, + CreateBatchJobRequestTypedDict, + ) + from .createfileresponse import CreateFileResponse, CreateFileResponseTypedDict + from .createfinetuningjobrequest import ( + CreateFineTuningJobRequest, + CreateFineTuningJobRequestIntegration, + CreateFineTuningJobRequestIntegrationTypedDict, + CreateFineTuningJobRequestRepository, + CreateFineTuningJobRequestRepositoryTypedDict, + CreateFineTuningJobRequestTypedDict, + Hyperparameters, + HyperparametersTypedDict, + ) + from .createlibraryrequest import ( + CreateLibraryRequest, + CreateLibraryRequestTypedDict, + ) + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + ) + from .deletefileresponse import DeleteFileResponse, DeleteFileResponseTypedDict + from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict + from .deltamessage import ( + DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, + DeltaMessageTypedDict, + ) + from .document import Document, DocumentTypedDict + from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict + from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict + from .embeddingdtype import EmbeddingDtype + from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, + EmbeddingRequestTypedDict, + ) + from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict + from .embeddingresponsedata import ( + EmbeddingResponseData, + EmbeddingResponseDataTypedDict, + ) + from .encodingformat import EncodingFormat + from .entitytype import EntityType + from .event import Event, EventTypedDict + from .file import File, FileTypedDict + from .filechunk import FileChunk, FileChunkTypedDict + from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + MultiPartBodyParams, + MultiPartBodyParamsTypedDict, + ) + from .fileschema import FileSchema, FileSchemaTypedDict + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .finetuneablemodeltype import FineTuneableModelType + from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, + ) + from .ftclassifierlossfunction import FTClassifierLossFunction + from .ftmodelcard import FTModelCard, FTModelCardTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryConfirmationStatus, + FunctionCallEntryTypedDict, + ) + from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, + ) + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventConfirmationStatus, + FunctionCallEventTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict + from .functiontool import FunctionTool, FunctionToolTypedDict + from .getfileresponse import GetFileResponse, GetFileResponseTypedDict + from .getsignedurlresponse import ( + GetSignedURLResponse, + GetSignedURLResponseTypedDict, + ) + from .githubrepository import GithubRepository, GithubRepositoryTypedDict + from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict + from .imagedetail import ImageDetail + from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, + ) + from .inputentries import InputEntries, InputEntriesTypedDict + from .inputs import Inputs, InputsTypedDict + from .instructrequest import ( + InstructRequest, + InstructRequestMessage, + InstructRequestMessageTypedDict, + InstructRequestTypedDict, + ) + from .jobmetadata import JobMetadata, JobMetadataTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, + OrderBy, + ) + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response, + ResponseTypedDict, + UnknownResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobsStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + DocumentUpload, + DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .library import Library, LibraryTypedDict + from .listbatchjobsresponse import ( + ListBatchJobsResponse, + ListBatchJobsResponseTypedDict, + ) + from .listdocumentsresponse import ( + ListDocumentsResponse, + ListDocumentsResponseTypedDict, + ) + from .listfilesresponse import ListFilesResponse, ListFilesResponseTypedDict + from .listfinetuningjobsresponse import ( + ListFineTuningJobsResponse, + ListFineTuningJobsResponseData, + ListFineTuningJobsResponseDataTypedDict, + ListFineTuningJobsResponseTypedDict, + UnknownListFineTuningJobsResponseData, + ) + from .listlibrariesresponse import ( + ListLibrariesResponse, + ListLibrariesResponseTypedDict, + ) + from .listsharingout import ListSharingOut, ListSharingOutTypedDict + from .messageentries import MessageEntries, MessageEntriesTypedDict + from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, + ) + from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryTypedDict, + Role, + ) + from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, + ) + from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryTypedDict, + ) + from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventTypedDict, + ) + from .metric import Metric, MetricTypedDict + from .mistralpromptmode import MistralPromptMode + from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict + from .modelconversation import ( + ModelConversation, + ModelConversationTool, + ModelConversationToolTypedDict, + ModelConversationTypedDict, + UnknownModelConversationTool, + ) + from .modellist import ( + ModelList, + ModelListData, + ModelListDataTypedDict, + ModelListTypedDict, + UnknownModelListData, + ) + from .moderationobject import ModerationObject, ModerationObjectTypedDict + from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import ( + DocumentUnion, + DocumentUnionTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .paginationinfo import PaginationInfo, PaginationInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .realtimetranscriptionerror import ( + RealtimeTranscriptionError, + RealtimeTranscriptionErrorTypedDict, + ) + from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailMessage, + RealtimeTranscriptionErrorDetailMessageTypedDict, + RealtimeTranscriptionErrorDetailTypedDict, + ) + from .realtimetranscriptioninputaudioappend import ( + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioAppendTypedDict, + ) + from .realtimetranscriptioninputaudioend import ( + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioEndTypedDict, + ) + from .realtimetranscriptioninputaudioflush import ( + RealtimeTranscriptionInputAudioFlush, + RealtimeTranscriptionInputAudioFlushTypedDict, + ) + from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, + ) + from .realtimetranscriptionsessioncreated import ( + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionCreatedTypedDict, + ) + from .realtimetranscriptionsessionupdated import ( + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdatedTypedDict, + ) + from .realtimetranscriptionsessionupdatemessage import ( + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdateMessageTypedDict, + ) + from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, + ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict + from .requestsource import RequestSource + from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict + from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventTypedDict, + ) + from .retrieve_model_v1_models_model_id_getop import ( + ResponseRetrieveModelV1ModelsModelIDGet, + ResponseRetrieveModelV1ModelsModelIDGetTypedDict, + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + UnknownResponseRetrieveModelV1ModelsModelIDGet, + ) + from .sampletype import SampleType + from .security import Security, SecurityTypedDict + from .shareenum import ShareEnum + from .sharingdelete import SharingDelete, SharingDeleteTypedDict + from .sharingin import SharingIn, SharingInTypedDict + from .sharingout import SharingOut, SharingOutTypedDict + from .source import Source + from .ssetypes import SSETypes + from .systemmessage import ( + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ( + ThinkChunk, + ThinkChunkThinking, + ThinkChunkThinkingTypedDict, + ThinkChunkTypedDict, + ) + from .timestampgranularity import TimestampGranularity + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolcallconfirmation import ( + Confirmation, + ToolCallConfirmation, + ToolCallConfirmationTypedDict, + ) + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict + from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventName, + ToolExecutionDeltaEventNameTypedDict, + ToolExecutionDeltaEventTypedDict, + ) + from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventName, + ToolExecutionDoneEventNameTypedDict, + ToolExecutionDoneEventTypedDict, + ) + from .toolexecutionentry import ( + ToolExecutionEntry, + ToolExecutionEntryName, + ToolExecutionEntryNameTypedDict, + ToolExecutionEntryTypedDict, + ) + from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventName, + ToolExecutionStartedEventNameTypedDict, + ToolExecutionStartedEventTypedDict, + ) + from .toolfilechunk import ( + ToolFileChunk, + ToolFileChunkTool, + ToolFileChunkToolTypedDict, + ToolFileChunkTypedDict, + ) + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageTypedDict, + ) + from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkTool, + ToolReferenceChunkToolTypedDict, + ToolReferenceChunkTypedDict, + ) + from .tooltypes import ToolTypes + from .trainingfile import TrainingFile, TrainingFileTypedDict + from .transcriptionresponse import ( + TranscriptionResponse, + TranscriptionResponseTypedDict, + ) + from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, + ) + from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, + ) + from .transcriptionstreamevents import ( + TranscriptionStreamEvents, + TranscriptionStreamEventsData, + TranscriptionStreamEventsDataTypedDict, + TranscriptionStreamEventsTypedDict, + UnknownTranscriptionStreamEventsData, + ) + from .transcriptionstreameventtypes import TranscriptionStreamEventTypes + from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, + ) + from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, + ) + from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, + ) + from .unarchivemodelresponse import ( + UnarchiveModelResponse, + UnarchiveModelResponseTypedDict, + ) + from .updateagentrequest import ( + UpdateAgentRequest, + UpdateAgentRequestTool, + UpdateAgentRequestToolTypedDict, + UpdateAgentRequestTypedDict, + ) + from .updatedocumentrequest import ( + Attributes, + AttributesTypedDict, + UpdateDocumentRequest, + UpdateDocumentRequestTypedDict, + ) + from .updatelibraryrequest import ( + UpdateLibraryRequest, + UpdateLibraryRequestTypedDict, + ) + from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict + from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, + ) + from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolTypedDict, + ) + from .websearchtool import WebSearchTool, WebSearchToolTypedDict + +__all__ = [ + "APIEndpoint", + "Agent", + "AgentAliasResponse", + "AgentAliasResponseTypedDict", + "AgentConversation", + "AgentConversationAgentVersion", + "AgentConversationAgentVersionTypedDict", + "AgentConversationTypedDict", + "AgentHandoffDoneEvent", + "AgentHandoffDoneEventTypedDict", + "AgentHandoffEntry", + "AgentHandoffEntryTypedDict", + "AgentHandoffStartedEvent", + "AgentHandoffStartedEventTypedDict", + "AgentTool", + "AgentToolTypedDict", + "AgentTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteAliasRequest", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetAgentVersion", + "AgentsAPIV1AgentsGetAgentVersionTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsListResponse", + "AgentsAPIV1ConversationsListResponseTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", + "AgentsCompletionRequest", + "AgentsCompletionRequestMessage", + "AgentsCompletionRequestMessageTypedDict", + "AgentsCompletionRequestStop", + "AgentsCompletionRequestStopTypedDict", + "AgentsCompletionRequestToolChoice", + "AgentsCompletionRequestToolChoiceTypedDict", + "AgentsCompletionRequestTypedDict", + "AgentsCompletionStreamRequest", + "AgentsCompletionStreamRequestMessage", + "AgentsCompletionStreamRequestMessageTypedDict", + "AgentsCompletionStreamRequestStop", + "AgentsCompletionStreamRequestStopTypedDict", + "AgentsCompletionStreamRequestToolChoice", + "AgentsCompletionStreamRequestToolChoiceTypedDict", + "AgentsCompletionStreamRequestTypedDict", + "ArchiveModelResponse", + "ArchiveModelResponseTypedDict", + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageTypedDict", + "Attributes", + "AttributesTypedDict", + "AudioChunk", + "AudioChunkTypedDict", + "AudioEncoding", + "AudioFormat", + "AudioFormatTypedDict", + "AudioTranscriptionRequest", + "AudioTranscriptionRequestStream", + "AudioTranscriptionRequestStreamTypedDict", + "AudioTranscriptionRequestTypedDict", + "BaseModelCard", + "BaseModelCardTypedDict", + "BatchError", + "BatchErrorTypedDict", + "BatchJob", + "BatchJobStatus", + "BatchJobTypedDict", + "BatchRequest", + "BatchRequestTypedDict", + "BuiltInConnectors", + "ChatClassificationRequest", + "ChatClassificationRequestTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "ChatModerationRequest", + "ChatModerationRequestInputs1", + "ChatModerationRequestInputs1TypedDict", + "ChatModerationRequestInputs2", + "ChatModerationRequestInputs2TypedDict", + "ChatModerationRequestInputs3", + "ChatModerationRequestInputs3TypedDict", + "ChatModerationRequestTypedDict", + "Checkpoint", + "CheckpointTypedDict", + "ClassificationRequest", + "ClassificationRequestInputs", + "ClassificationRequestInputsTypedDict", + "ClassificationRequestTypedDict", + "ClassificationResponse", + "ClassificationResponseTypedDict", + "ClassificationTargetResult", + "ClassificationTargetResultTypedDict", + "ClassifierFineTunedModel", + "ClassifierFineTunedModelTypedDict", + "ClassifierFineTuningJob", + "ClassifierFineTuningJobDetails", + "ClassifierFineTuningJobDetailsIntegration", + "ClassifierFineTuningJobDetailsIntegrationTypedDict", + "ClassifierFineTuningJobDetailsStatus", + "ClassifierFineTuningJobDetailsTypedDict", + "ClassifierFineTuningJobIntegration", + "ClassifierFineTuningJobIntegrationTypedDict", + "ClassifierFineTuningJobStatus", + "ClassifierFineTuningJobTypedDict", + "ClassifierTarget", + "ClassifierTargetResult", + "ClassifierTargetResultTypedDict", + "ClassifierTargetTypedDict", + "ClassifierTrainingParameters", + "ClassifierTrainingParametersTypedDict", + "CodeInterpreterTool", + "CodeInterpreterToolTypedDict", + "CompletionArgs", + "CompletionArgsStop", + "CompletionArgsStopTypedDict", + "CompletionArgsTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionFineTunedModel", + "CompletionFineTunedModelTypedDict", + "CompletionFineTuningJob", + "CompletionFineTuningJobDetails", + "CompletionFineTuningJobDetailsIntegration", + "CompletionFineTuningJobDetailsIntegrationTypedDict", + "CompletionFineTuningJobDetailsRepository", + "CompletionFineTuningJobDetailsRepositoryTypedDict", + "CompletionFineTuningJobDetailsStatus", + "CompletionFineTuningJobDetailsTypedDict", + "CompletionFineTuningJobIntegration", + "CompletionFineTuningJobIntegrationTypedDict", + "CompletionFineTuningJobRepository", + "CompletionFineTuningJobRepositoryTypedDict", + "CompletionFineTuningJobStatus", + "CompletionFineTuningJobTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "CompletionTrainingParameters", + "CompletionTrainingParametersTypedDict", + "Confirmation", + "ContentChunk", + "ContentChunkTypedDict", + "ConversationAppendRequest", + "ConversationAppendRequestHandoffExecution", + "ConversationAppendRequestTypedDict", + "ConversationAppendStreamRequest", + "ConversationAppendStreamRequestHandoffExecution", + "ConversationAppendStreamRequestTypedDict", + "ConversationEvents", + "ConversationEventsData", + "ConversationEventsDataTypedDict", + "ConversationEventsTypedDict", + "ConversationHistory", + "ConversationHistoryTypedDict", + "ConversationInputs", + "ConversationInputsTypedDict", + "ConversationMessages", + "ConversationMessagesTypedDict", + "ConversationRequest", + "ConversationRequestAgentVersion", + "ConversationRequestAgentVersionTypedDict", + "ConversationRequestHandoffExecution", + "ConversationRequestTool", + "ConversationRequestToolTypedDict", + "ConversationRequestTypedDict", + "ConversationResponse", + "ConversationResponseTypedDict", + "ConversationRestartRequest", + "ConversationRestartRequestAgentVersion", + "ConversationRestartRequestAgentVersionTypedDict", + "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTypedDict", + "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestAgentVersion", + "ConversationRestartStreamRequestAgentVersionTypedDict", + "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTypedDict", + "ConversationStreamRequest", + "ConversationStreamRequestAgentVersion", + "ConversationStreamRequestAgentVersionTypedDict", + "ConversationStreamRequestHandoffExecution", + "ConversationStreamRequestTool", + "ConversationStreamRequestToolTypedDict", + "ConversationStreamRequestTypedDict", + "ConversationThinkChunk", + "ConversationThinkChunkThinking", + "ConversationThinkChunkThinkingTypedDict", + "ConversationThinkChunkTypedDict", + "ConversationUsageInfo", + "ConversationUsageInfoTypedDict", + "CreateAgentRequest", + "CreateAgentRequestTool", + "CreateAgentRequestToolTypedDict", + "CreateAgentRequestTypedDict", + "CreateBatchJobRequest", + "CreateBatchJobRequestTypedDict", + "CreateFileResponse", + "CreateFileResponseTypedDict", + "CreateFineTuningJobRequest", + "CreateFineTuningJobRequestIntegration", + "CreateFineTuningJobRequestIntegrationTypedDict", + "CreateFineTuningJobRequestRepository", + "CreateFineTuningJobRequestRepositoryTypedDict", + "CreateFineTuningJobRequestTypedDict", + "CreateLibraryRequest", + "CreateLibraryRequestTypedDict", + "DeleteFileResponse", + "DeleteFileResponseTypedDict", + "DeleteModelOut", + "DeleteModelOutTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", + "DeltaMessageTypedDict", + "Document", + "DocumentLibraryTool", + "DocumentLibraryToolTypedDict", + "DocumentTextContent", + "DocumentTextContentTypedDict", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkTypedDict", + "DocumentUnion", + "DocumentUnionTypedDict", + "DocumentUpload", + "DocumentUploadTypedDict", + "EmbeddingDtype", + "EmbeddingRequest", + "EmbeddingRequestInputs", + "EmbeddingRequestInputsTypedDict", + "EmbeddingRequestTypedDict", + "EmbeddingResponse", + "EmbeddingResponseData", + "EmbeddingResponseDataTypedDict", + "EmbeddingResponseTypedDict", + "EncodingFormat", + "EntityType", + "Entry", + "EntryTypedDict", + "Event", + "EventTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FTClassifierLossFunction", + "FTModelCard", + "FTModelCardTypedDict", + "File", + "FileChunk", + "FileChunkTypedDict", + "FilePurpose", + "FileSchema", + "FileSchemaTypedDict", + "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FineTuneableModelType", + "FineTunedModelCapabilities", + "FineTunedModelCapabilitiesTypedDict", + "Format", + "Function", + "FunctionCall", + "FunctionCallEntry", + "FunctionCallEntryArguments", + "FunctionCallEntryArgumentsTypedDict", + "FunctionCallEntryConfirmationStatus", + "FunctionCallEntryTypedDict", + "FunctionCallEvent", + "FunctionCallEventConfirmationStatus", + "FunctionCallEventTypedDict", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionResultEntry", + "FunctionResultEntryTypedDict", + "FunctionTool", + "FunctionToolTypedDict", + "FunctionTypedDict", + "GetFileResponse", + "GetFileResponseTypedDict", + "GetSignedURLResponse", + "GetSignedURLResponseTypedDict", + "GithubRepository", + "GithubRepositoryIn", + "GithubRepositoryInTypedDict", + "GithubRepositoryTypedDict", + "Hyperparameters", + "HyperparametersTypedDict", + "ImageDetail", + "ImageGenerationTool", + "ImageGenerationToolTypedDict", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", + "InputEntries", + "InputEntriesTypedDict", + "Inputs", + "InputsTypedDict", + "InstructRequest", + "InstructRequestMessage", + "InstructRequestMessageTypedDict", + "InstructRequestTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "JobMetadata", + "JobMetadataTypedDict", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "LegacyJobMetadata", + "LegacyJobMetadataTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "Library", + "LibraryTypedDict", + "ListBatchJobsResponse", + "ListBatchJobsResponseTypedDict", + "ListDocumentsResponse", + "ListDocumentsResponseTypedDict", + "ListFilesResponse", + "ListFilesResponseTypedDict", + "ListFineTuningJobsResponse", + "ListFineTuningJobsResponseData", + "ListFineTuningJobsResponseDataTypedDict", + "ListFineTuningJobsResponseTypedDict", + "ListLibrariesResponse", + "ListLibrariesResponseTypedDict", + "ListSharingOut", + "ListSharingOutTypedDict", + "Loc", + "LocTypedDict", + "MessageEntries", + "MessageEntriesTypedDict", + "MessageInputContentChunks", + "MessageInputContentChunksTypedDict", + "MessageInputEntry", + "MessageInputEntryContent", + "MessageInputEntryContentTypedDict", + "MessageInputEntryTypedDict", + "MessageOutputContentChunks", + "MessageOutputContentChunksTypedDict", + "MessageOutputEntry", + "MessageOutputEntryContent", + "MessageOutputEntryContentTypedDict", + "MessageOutputEntryTypedDict", + "MessageOutputEvent", + "MessageOutputEventContent", + "MessageOutputEventContentTypedDict", + "MessageOutputEventTypedDict", + "Metric", + "MetricTypedDict", + "MistralPromptMode", + "ModelCapabilities", + "ModelCapabilitiesTypedDict", + "ModelConversation", + "ModelConversationTool", + "ModelConversationToolTypedDict", + "ModelConversationTypedDict", + "ModelList", + "ModelListData", + "ModelListDataTypedDict", + "ModelListTypedDict", + "ModerationObject", + "ModerationObjectTypedDict", + "ModerationResponse", + "ModerationResponseTypedDict", + "MultiPartBodyParams", + "MultiPartBodyParamsTypedDict", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", + "OrderBy", + "Output", + "OutputContentChunks", + "OutputContentChunksTypedDict", + "OutputTypedDict", + "PaginationInfo", + "PaginationInfoTypedDict", + "Prediction", + "PredictionTypedDict", + "ProcessingStatusOut", + "ProcessingStatusOutTypedDict", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailMessage", + "RealtimeTranscriptionErrorDetailMessageTypedDict", + "RealtimeTranscriptionErrorDetailTypedDict", + "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionInputAudioAppend", + "RealtimeTranscriptionInputAudioAppendTypedDict", + "RealtimeTranscriptionInputAudioEnd", + "RealtimeTranscriptionInputAudioEndTypedDict", + "RealtimeTranscriptionInputAudioFlush", + "RealtimeTranscriptionInputAudioFlushTypedDict", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionCreatedTypedDict", + "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdateMessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict", + "RealtimeTranscriptionSessionUpdatePayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict", + "RealtimeTranscriptionSessionUpdated", + "RealtimeTranscriptionSessionUpdatedTypedDict", + "ReferenceChunk", + "ReferenceChunkTypedDict", + "RequestSource", + "Response", + "ResponseDoneEvent", + "ResponseDoneEventTypedDict", + "ResponseErrorEvent", + "ResponseErrorEventTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "ResponseRetrieveModelV1ModelsModelIDGet", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", + "ResponseStartedEvent", + "ResponseStartedEventTypedDict", + "ResponseTypedDict", + "ResponseV1ConversationsGet", + "ResponseV1ConversationsGetTypedDict", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "Role", + "SSETypes", + "SampleType", + "Security", + "SecurityTypedDict", + "ShareEnum", + "SharingDelete", + "SharingDeleteTypedDict", + "SharingIn", + "SharingInTypedDict", + "SharingOut", + "SharingOutTypedDict", + "Source", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TableFormat", + "TextChunk", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkThinking", + "ThinkChunkThinkingTypedDict", + "ThinkChunkTypedDict", + "TimestampGranularity", + "Tool", + "ToolCall", + "ToolCallConfirmation", + "ToolCallConfirmationTypedDict", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolConfiguration", + "ToolConfigurationTypedDict", + "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventName", + "ToolExecutionDeltaEventNameTypedDict", + "ToolExecutionDeltaEventTypedDict", + "ToolExecutionDoneEvent", + "ToolExecutionDoneEventName", + "ToolExecutionDoneEventNameTypedDict", + "ToolExecutionDoneEventTypedDict", + "ToolExecutionEntry", + "ToolExecutionEntryName", + "ToolExecutionEntryNameTypedDict", + "ToolExecutionEntryTypedDict", + "ToolExecutionStartedEvent", + "ToolExecutionStartedEventName", + "ToolExecutionStartedEventNameTypedDict", + "ToolExecutionStartedEventTypedDict", + "ToolFileChunk", + "ToolFileChunkTool", + "ToolFileChunkToolTypedDict", + "ToolFileChunkTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageTypedDict", + "ToolReferenceChunk", + "ToolReferenceChunkTool", + "ToolReferenceChunkToolTypedDict", + "ToolReferenceChunkTypedDict", + "ToolTypedDict", + "ToolTypes", + "TrainingFile", + "TrainingFileTypedDict", + "TranscriptionResponse", + "TranscriptionResponseTypedDict", + "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkTypedDict", + "TranscriptionStreamDone", + "TranscriptionStreamDoneTypedDict", + "TranscriptionStreamEventTypes", + "TranscriptionStreamEvents", + "TranscriptionStreamEventsData", + "TranscriptionStreamEventsDataTypedDict", + "TranscriptionStreamEventsTypedDict", + "TranscriptionStreamLanguage", + "TranscriptionStreamLanguageTypedDict", + "TranscriptionStreamSegmentDelta", + "TranscriptionStreamSegmentDeltaTypedDict", + "TranscriptionStreamTextDelta", + "TranscriptionStreamTextDeltaTypedDict", + "UnarchiveModelResponse", + "UnarchiveModelResponseTypedDict", + "UnknownAgentTool", + "UnknownClassifierFineTuningJobDetailsIntegration", + "UnknownClassifierFineTuningJobIntegration", + "UnknownCompletionFineTuningJobDetailsIntegration", + "UnknownCompletionFineTuningJobDetailsRepository", + "UnknownCompletionFineTuningJobIntegration", + "UnknownCompletionFineTuningJobRepository", + "UnknownContentChunk", + "UnknownConversationEventsData", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "UnknownListFineTuningJobsResponseData", + "UnknownModelConversationTool", + "UnknownModelListData", + "UnknownResponse", + "UnknownResponseRetrieveModelV1ModelsModelIDGet", + "UnknownTranscriptionStreamEventsData", + "UpdateAgentRequest", + "UpdateAgentRequestTool", + "UpdateAgentRequestToolTypedDict", + "UpdateAgentRequestTypedDict", + "UpdateDocumentRequest", + "UpdateDocumentRequestTypedDict", + "UpdateLibraryRequest", + "UpdateLibraryRequestTypedDict", + "UpdateModelRequest", + "UpdateModelRequestTypedDict", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", + "WandbIntegration", + "WandbIntegrationResult", + "WandbIntegrationResultTypedDict", + "WandbIntegrationTypedDict", + "WebSearchPremiumTool", + "WebSearchPremiumToolTypedDict", + "WebSearchTool", + "WebSearchToolTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "Agent": ".agent", + "AgentTool": ".agent", + "AgentToolTypedDict": ".agent", + "AgentTypedDict": ".agent", + "UnknownAgentTool": ".agent", + "AgentAliasResponse": ".agentaliasresponse", + "AgentAliasResponseTypedDict": ".agentaliasresponse", + "AgentConversation": ".agentconversation", + "AgentConversationAgentVersion": ".agentconversation", + "AgentConversationAgentVersionTypedDict": ".agentconversation", + "AgentConversationTypedDict": ".agentconversation", + "AgentHandoffDoneEvent": ".agenthandoffdoneevent", + "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", + "AgentHandoffEntry": ".agenthandoffentry", + "AgentHandoffEntryTypedDict": ".agenthandoffentry", + "AgentHandoffStartedEvent": ".agenthandoffstartedevent", + "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequest": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", + "AgentsCompletionRequest": ".agentscompletionrequest", + "AgentsCompletionRequestMessage": ".agentscompletionrequest", + "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestStop": ".agentscompletionrequest", + "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", + "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessage": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessageTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", + "APIEndpoint": ".apiendpoint", + "ArchiveModelResponse": ".archivemodelresponse", + "ArchiveModelResponseTypedDict": ".archivemodelresponse", + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "AudioChunk": ".audiochunk", + "AudioChunkTypedDict": ".audiochunk", + "AudioEncoding": ".audioencoding", + "AudioFormat": ".audioformat", + "AudioFormatTypedDict": ".audioformat", + "AudioTranscriptionRequest": ".audiotranscriptionrequest", + "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", + "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", + "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", + "BaseModelCard": ".basemodelcard", + "BaseModelCardTypedDict": ".basemodelcard", + "BatchError": ".batcherror", + "BatchErrorTypedDict": ".batcherror", + "BatchJob": ".batchjob", + "BatchJobTypedDict": ".batchjob", + "BatchJobStatus": ".batchjobstatus", + "BatchRequest": ".batchrequest", + "BatchRequestTypedDict": ".batchrequest", + "BuiltInConnectors": ".builtinconnectors", + "ChatClassificationRequest": ".chatclassificationrequest", + "ChatClassificationRequestTypedDict": ".chatclassificationrequest", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "ChatModerationRequest": ".chatmoderationrequest", + "ChatModerationRequestInputs1": ".chatmoderationrequest", + "ChatModerationRequestInputs1TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs2": ".chatmoderationrequest", + "ChatModerationRequestInputs2TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs3": ".chatmoderationrequest", + "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", + "ChatModerationRequestTypedDict": ".chatmoderationrequest", + "Checkpoint": ".checkpoint", + "CheckpointTypedDict": ".checkpoint", + "ClassificationRequest": ".classificationrequest", + "ClassificationRequestInputs": ".classificationrequest", + "ClassificationRequestInputsTypedDict": ".classificationrequest", + "ClassificationRequestTypedDict": ".classificationrequest", + "ClassificationResponse": ".classificationresponse", + "ClassificationResponseTypedDict": ".classificationresponse", + "ClassificationTargetResult": ".classificationtargetresult", + "ClassificationTargetResultTypedDict": ".classificationtargetresult", + "ClassifierFineTunedModel": ".classifierfinetunedmodel", + "ClassifierFineTunedModelTypedDict": ".classifierfinetunedmodel", + "ClassifierFineTuningJob": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegrationTypedDict": ".classifierfinetuningjob", + "ClassifierFineTuningJobStatus": ".classifierfinetuningjob", + "ClassifierFineTuningJobTypedDict": ".classifierfinetuningjob", + "UnknownClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobDetails": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegrationTypedDict": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsStatus": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsTypedDict": ".classifierfinetuningjobdetails", + "UnknownClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierTarget": ".classifiertarget", + "ClassifierTargetTypedDict": ".classifiertarget", + "ClassifierTargetResult": ".classifiertargetresult", + "ClassifierTargetResultTypedDict": ".classifiertargetresult", + "ClassifierTrainingParameters": ".classifiertrainingparameters", + "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", + "CodeInterpreterTool": ".codeinterpretertool", + "CodeInterpreterToolTypedDict": ".codeinterpretertool", + "CompletionArgs": ".completionargs", + "CompletionArgsTypedDict": ".completionargs", + "CompletionArgsStop": ".completionargsstop", + "CompletionArgsStopTypedDict": ".completionargsstop", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionFineTunedModel": ".completionfinetunedmodel", + "CompletionFineTunedModelTypedDict": ".completionfinetunedmodel", + "CompletionFineTuningJob": ".completionfinetuningjob", + "CompletionFineTuningJobIntegration": ".completionfinetuningjob", + "CompletionFineTuningJobIntegrationTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobRepositoryTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobStatus": ".completionfinetuningjob", + "CompletionFineTuningJobTypedDict": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobIntegration": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobDetails": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegrationTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepositoryTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsStatus": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsTypedDict": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "CompletionTrainingParameters": ".completiontrainingparameters", + "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", + "ConversationAppendRequest": ".conversationappendrequest", + "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", + "ConversationAppendRequestTypedDict": ".conversationappendrequest", + "ConversationAppendStreamRequest": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", + "ConversationEvents": ".conversationevents", + "ConversationEventsData": ".conversationevents", + "ConversationEventsDataTypedDict": ".conversationevents", + "ConversationEventsTypedDict": ".conversationevents", + "UnknownConversationEventsData": ".conversationevents", + "ConversationHistory": ".conversationhistory", + "ConversationHistoryTypedDict": ".conversationhistory", + "Entry": ".conversationhistory", + "EntryTypedDict": ".conversationhistory", + "ConversationInputs": ".conversationinputs", + "ConversationInputsTypedDict": ".conversationinputs", + "ConversationMessages": ".conversationmessages", + "ConversationMessagesTypedDict": ".conversationmessages", + "ConversationRequest": ".conversationrequest", + "ConversationRequestAgentVersion": ".conversationrequest", + "ConversationRequestAgentVersionTypedDict": ".conversationrequest", + "ConversationRequestHandoffExecution": ".conversationrequest", + "ConversationRequestTool": ".conversationrequest", + "ConversationRequestToolTypedDict": ".conversationrequest", + "ConversationRequestTypedDict": ".conversationrequest", + "ConversationResponse": ".conversationresponse", + "ConversationResponseTypedDict": ".conversationresponse", + "Output": ".conversationresponse", + "OutputTypedDict": ".conversationresponse", + "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", + "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", + "ConversationRestartRequestTypedDict": ".conversationrestartrequest", + "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", + "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", + "ConversationStreamRequestTool": ".conversationstreamrequest", + "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationThinkChunk": ".conversationthinkchunk", + "ConversationThinkChunkThinking": ".conversationthinkchunk", + "ConversationThinkChunkThinkingTypedDict": ".conversationthinkchunk", + "ConversationThinkChunkTypedDict": ".conversationthinkchunk", + "ConversationUsageInfo": ".conversationusageinfo", + "ConversationUsageInfoTypedDict": ".conversationusageinfo", + "CreateAgentRequest": ".createagentrequest", + "CreateAgentRequestTool": ".createagentrequest", + "CreateAgentRequestToolTypedDict": ".createagentrequest", + "CreateAgentRequestTypedDict": ".createagentrequest", + "CreateBatchJobRequest": ".createbatchjobrequest", + "CreateBatchJobRequestTypedDict": ".createbatchjobrequest", + "CreateFileResponse": ".createfileresponse", + "CreateFileResponseTypedDict": ".createfileresponse", + "CreateFineTuningJobRequest": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegration": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegrationTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepository": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepositoryTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestTypedDict": ".createfinetuningjobrequest", + "Hyperparameters": ".createfinetuningjobrequest", + "HyperparametersTypedDict": ".createfinetuningjobrequest", + "CreateLibraryRequest": ".createlibraryrequest", + "CreateLibraryRequestTypedDict": ".createlibraryrequest", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileResponse": ".deletefileresponse", + "DeleteFileResponseTypedDict": ".deletefileresponse", + "DeleteModelOut": ".deletemodelout", + "DeleteModelOutTypedDict": ".deletemodelout", + "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "Document": ".document", + "DocumentTypedDict": ".document", + "DocumentLibraryTool": ".documentlibrarytool", + "DocumentLibraryToolTypedDict": ".documentlibrarytool", + "DocumentTextContent": ".documenttextcontent", + "DocumentTextContentTypedDict": ".documenttextcontent", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "EmbeddingDtype": ".embeddingdtype", + "EmbeddingRequest": ".embeddingrequest", + "EmbeddingRequestInputs": ".embeddingrequest", + "EmbeddingRequestInputsTypedDict": ".embeddingrequest", + "EmbeddingRequestTypedDict": ".embeddingrequest", + "EmbeddingResponse": ".embeddingresponse", + "EmbeddingResponseTypedDict": ".embeddingresponse", + "EmbeddingResponseData": ".embeddingresponsedata", + "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EncodingFormat": ".encodingformat", + "EntityType": ".entitytype", + "Event": ".event", + "EventTypedDict": ".event", + "File": ".file", + "FileTypedDict": ".file", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "MultiPartBodyParams": ".files_api_routes_upload_fileop", + "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "FileSchema": ".fileschema", + "FileSchemaTypedDict": ".fileschema", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "FineTuneableModelType": ".finetuneablemodeltype", + "FineTunedModelCapabilities": ".finetunedmodelcapabilities", + "FineTunedModelCapabilitiesTypedDict": ".finetunedmodelcapabilities", + "FTClassifierLossFunction": ".ftclassifierlossfunction", + "FTModelCard": ".ftmodelcard", + "FTModelCardTypedDict": ".ftmodelcard", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionCallEntry": ".functioncallentry", + "FunctionCallEntryConfirmationStatus": ".functioncallentry", + "FunctionCallEntryTypedDict": ".functioncallentry", + "FunctionCallEntryArguments": ".functioncallentryarguments", + "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", + "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventConfirmationStatus": ".functioncallevent", + "FunctionCallEventTypedDict": ".functioncallevent", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "FunctionResultEntry": ".functionresultentry", + "FunctionResultEntryTypedDict": ".functionresultentry", + "FunctionTool": ".functiontool", + "FunctionToolTypedDict": ".functiontool", + "GetFileResponse": ".getfileresponse", + "GetFileResponseTypedDict": ".getfileresponse", + "GetSignedURLResponse": ".getsignedurlresponse", + "GetSignedURLResponseTypedDict": ".getsignedurlresponse", + "GithubRepository": ".githubrepository", + "GithubRepositoryTypedDict": ".githubrepository", + "GithubRepositoryIn": ".githubrepositoryin", + "GithubRepositoryInTypedDict": ".githubrepositoryin", + "ImageDetail": ".imagedetail", + "ImageGenerationTool": ".imagegenerationtool", + "ImageGenerationToolTypedDict": ".imagegenerationtool", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", + "InputEntries": ".inputentries", + "InputEntriesTypedDict": ".inputentries", + "Inputs": ".inputs", + "InputsTypedDict": ".inputs", + "InstructRequest": ".instructrequest", + "InstructRequestMessage": ".instructrequest", + "InstructRequestMessageTypedDict": ".instructrequest", + "InstructRequestTypedDict": ".instructrequest", + "JobMetadata": ".jobmetadata", + "JobMetadataTypedDict": ".jobmetadata", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "OrderBy": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "UnknownResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "LegacyJobMetadata": ".legacyjobmetadata", + "LegacyJobMetadataTypedDict": ".legacyjobmetadata", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "DocumentUpload": ".libraries_documents_upload_v1op", + "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "Library": ".library", + "LibraryTypedDict": ".library", + "ListBatchJobsResponse": ".listbatchjobsresponse", + "ListBatchJobsResponseTypedDict": ".listbatchjobsresponse", + "ListDocumentsResponse": ".listdocumentsresponse", + "ListDocumentsResponseTypedDict": ".listdocumentsresponse", + "ListFilesResponse": ".listfilesresponse", + "ListFilesResponseTypedDict": ".listfilesresponse", + "ListFineTuningJobsResponse": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseDataTypedDict": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseTypedDict": ".listfinetuningjobsresponse", + "UnknownListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListLibrariesResponse": ".listlibrariesresponse", + "ListLibrariesResponseTypedDict": ".listlibrariesresponse", + "ListSharingOut": ".listsharingout", + "ListSharingOutTypedDict": ".listsharingout", + "MessageEntries": ".messageentries", + "MessageEntriesTypedDict": ".messageentries", + "MessageInputContentChunks": ".messageinputcontentchunks", + "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", + "MessageInputEntry": ".messageinputentry", + "MessageInputEntryContent": ".messageinputentry", + "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryTypedDict": ".messageinputentry", + "Role": ".messageinputentry", + "MessageOutputContentChunks": ".messageoutputcontentchunks", + "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", + "MessageOutputEntry": ".messageoutputentry", + "MessageOutputEntryContent": ".messageoutputentry", + "MessageOutputEntryContentTypedDict": ".messageoutputentry", + "MessageOutputEntryTypedDict": ".messageoutputentry", + "MessageOutputEvent": ".messageoutputevent", + "MessageOutputEventContent": ".messageoutputevent", + "MessageOutputEventContentTypedDict": ".messageoutputevent", + "MessageOutputEventTypedDict": ".messageoutputevent", + "Metric": ".metric", + "MetricTypedDict": ".metric", + "MistralPromptMode": ".mistralpromptmode", + "ModelCapabilities": ".modelcapabilities", + "ModelCapabilitiesTypedDict": ".modelcapabilities", + "ModelConversation": ".modelconversation", + "ModelConversationTool": ".modelconversation", + "ModelConversationToolTypedDict": ".modelconversation", + "ModelConversationTypedDict": ".modelconversation", + "UnknownModelConversationTool": ".modelconversation", + "ModelList": ".modellist", + "ModelListData": ".modellist", + "ModelListDataTypedDict": ".modellist", + "ModelListTypedDict": ".modellist", + "UnknownModelListData": ".modellist", + "ModerationObject": ".moderationobject", + "ModerationObjectTypedDict": ".moderationobject", + "ModerationResponse": ".moderationresponse", + "ModerationResponseTypedDict": ".moderationresponse", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "DocumentUnion": ".ocrrequest", + "DocumentUnionTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "OutputContentChunks": ".outputcontentchunks", + "OutputContentChunksTypedDict": ".outputcontentchunks", + "PaginationInfo": ".paginationinfo", + "PaginationInfoTypedDict": ".paginationinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ProcessingStatusOut": ".processingstatusout", + "ProcessingStatusOutTypedDict": ".processingstatusout", + "RealtimeTranscriptionError": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessage": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessageTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionInputAudioAppend": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioAppendTypedDict": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioEnd": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioEndTypedDict": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioFlush": ".realtimetranscriptioninputaudioflush", + "RealtimeTranscriptionInputAudioFlushTypedDict": ".realtimetranscriptioninputaudioflush", + "RealtimeTranscriptionSession": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdateMessage": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdatePayload": ".realtimetranscriptionsessionupdatepayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict": ".realtimetranscriptionsessionupdatepayload", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "RequestSource": ".requestsource", + "ResponseDoneEvent": ".responsedoneevent", + "ResponseDoneEventTypedDict": ".responsedoneevent", + "ResponseErrorEvent": ".responseerrorevent", + "ResponseErrorEventTypedDict": ".responseerrorevent", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseStartedEvent": ".responsestartedevent", + "ResponseStartedEventTypedDict": ".responsestartedevent", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "UnknownResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "SampleType": ".sampletype", + "Security": ".security", + "SecurityTypedDict": ".security", + "ShareEnum": ".shareenum", + "SharingDelete": ".sharingdelete", + "SharingDeleteTypedDict": ".sharingdelete", + "SharingIn": ".sharingin", + "SharingInTypedDict": ".sharingin", + "SharingOut": ".sharingout", + "SharingOutTypedDict": ".sharingout", + "Source": ".source", + "SSETypes": ".ssetypes", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkThinking": ".thinkchunk", + "ThinkChunkThinkingTypedDict": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "TimestampGranularity": ".timestampgranularity", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "Confirmation": ".toolcallconfirmation", + "ToolCallConfirmation": ".toolcallconfirmation", + "ToolCallConfirmationTypedDict": ".toolcallconfirmation", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolConfiguration": ".toolconfiguration", + "ToolConfigurationTypedDict": ".toolconfiguration", + "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventName": ".toolexecutiondoneevent", + "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", + "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryName": ".toolexecutionentry", + "ToolExecutionEntryNameTypedDict": ".toolexecutionentry", + "ToolExecutionEntryTypedDict": ".toolexecutionentry", + "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventName": ".toolexecutionstartedevent", + "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", + "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", + "ToolFileChunk": ".toolfilechunk", + "ToolFileChunkTool": ".toolfilechunk", + "ToolFileChunkToolTypedDict": ".toolfilechunk", + "ToolFileChunkTypedDict": ".toolfilechunk", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolReferenceChunk": ".toolreferencechunk", + "ToolReferenceChunkTool": ".toolreferencechunk", + "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", + "ToolReferenceChunkTypedDict": ".toolreferencechunk", + "ToolTypes": ".tooltypes", + "TrainingFile": ".trainingfile", + "TrainingFileTypedDict": ".trainingfile", + "TranscriptionResponse": ".transcriptionresponse", + "TranscriptionResponseTypedDict": ".transcriptionresponse", + "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", + "TranscriptionStreamDone": ".transcriptionstreamdone", + "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", + "TranscriptionStreamEvents": ".transcriptionstreamevents", + "TranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "UnknownTranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", + "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", + "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", + "UnarchiveModelResponse": ".unarchivemodelresponse", + "UnarchiveModelResponseTypedDict": ".unarchivemodelresponse", + "UpdateAgentRequest": ".updateagentrequest", + "UpdateAgentRequestTool": ".updateagentrequest", + "UpdateAgentRequestToolTypedDict": ".updateagentrequest", + "UpdateAgentRequestTypedDict": ".updateagentrequest", + "Attributes": ".updatedocumentrequest", + "AttributesTypedDict": ".updatedocumentrequest", + "UpdateDocumentRequest": ".updatedocumentrequest", + "UpdateDocumentRequestTypedDict": ".updatedocumentrequest", + "UpdateLibraryRequest": ".updatelibraryrequest", + "UpdateLibraryRequestTypedDict": ".updatelibraryrequest", + "UpdateModelRequest": ".updatemodelrequest", + "UpdateModelRequestTypedDict": ".updatemodelrequest", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", + "WandbIntegration": ".wandbintegration", + "WandbIntegrationTypedDict": ".wandbintegration", + "WandbIntegrationResult": ".wandbintegrationresult", + "WandbIntegrationResultTypedDict": ".wandbintegrationresult", + "WebSearchPremiumTool": ".websearchpremiumtool", + "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", + "WebSearchTool": ".websearchtool", + "WebSearchToolTypedDict": ".websearchtool", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py new file mode 100644 index 00000000..686a6eb8 --- /dev/null +++ b/src/mistralai/client/models/agent.py @@ -0,0 +1,191 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1336849c84fb + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from functools import partial +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentToolTypedDict = TypeAliasType( + "AgentToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +class UnknownAgentTool(BaseModel): + r"""A AgentTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_AGENT_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + +AgentTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + UnknownAgentTool, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_AGENT_TOOL_VARIANTS, + unknown_cls=UnknownAgentTool, + union_name="AgentTool", + ) + ), +] + + +class AgentTypedDict(TypedDict): + model: str + name: str + id: str + version: int + versions: List[int] + created_at: datetime + updated_at: datetime + deployment_chat: bool + source: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + object: Literal["agent"] + version_message: NotRequired[Nullable[str]] + + +class Agent(BaseModel): + model: str + + name: str + + id: str + + version: int + + versions: List[int] + + created_at: datetime + + updated_at: datetime + + deployment_chat: bool + + source: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["agent"]], AfterValidator(validate_const("agent"))], + pydantic.Field(alias="object"), + ] = "agent" + + version_message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "object", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + Agent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agentaliasresponse.py b/src/mistralai/client/models/agentaliasresponse.py new file mode 100644 index 00000000..6972af2a --- /dev/null +++ b/src/mistralai/client/models/agentaliasresponse.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3899a98a55dd + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AgentAliasResponseTypedDict(TypedDict): + alias: str + version: int + created_at: datetime + updated_at: datetime + + +class AgentAliasResponse(BaseModel): + alias: str + + version: int + + created_at: datetime + + updated_at: datetime diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py new file mode 100644 index 00000000..da30c663 --- /dev/null +++ b/src/mistralai/client/models/agentconversation.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b7d73eddf51 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentConversationAgentVersionTypedDict = TypeAliasType( + "AgentConversationAgentVersionTypedDict", Union[str, int] +) + + +AgentConversationAgentVersion = TypeAliasType( + "AgentConversationAgentVersion", Union[str, int] +) + + +class AgentConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + agent_id: str + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: Literal["conversation"] + agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] + + +class AgentConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + agent_id: str + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" + + agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["name", "description", "metadata", "object", "agent_version"] + ) + nullable_fields = set(["name", "description", "metadata", "agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AgentConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py new file mode 100644 index 00000000..e2609e3d --- /dev/null +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 82628bb5fcea + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentHandoffDoneEventTypedDict(TypedDict): + id: str + next_agent_id: str + next_agent_name: str + type: Literal["agent.handoff.done"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffDoneEvent(BaseModel): + id: str + + next_agent_id: str + + next_agent_name: str + + type: Annotated[ + Annotated[ + Literal["agent.handoff.done"], + AfterValidator(validate_const("agent.handoff.done")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py new file mode 100644 index 00000000..f92ef2cc --- /dev/null +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -0,0 +1,90 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5030bcaa3a07 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentHandoffEntryTypedDict(TypedDict): + previous_agent_id: str + previous_agent_name: str + next_agent_id: str + next_agent_name: str + object: Literal["entry"] + type: Literal["agent.handoff"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class AgentHandoffEntry(BaseModel): + previous_agent_id: str + + previous_agent_name: str + + next_agent_id: str + + next_agent_name: str + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["agent.handoff"]], + AfterValidator(validate_const("agent.handoff")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AgentHandoffEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py new file mode 100644 index 00000000..2a402341 --- /dev/null +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f6093d9b222 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentHandoffStartedEventTypedDict(TypedDict): + id: str + previous_agent_id: str + previous_agent_name: str + type: Literal["agent.handoff.started"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffStartedEvent(BaseModel): + id: str + + previous_agent_id: str + + previous_agent_name: str + + type: Annotated[ + Annotated[ + Literal["agent.handoff.started"], + AfterValidator(validate_const("agent.handoff.started")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py new file mode 100644 index 00000000..04761ae7 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 23a832f8f175 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + version: int + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py new file mode 100644 index 00000000..291a9802 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9c9947e768d3 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + + +class AgentsAPIV1AgentsDeleteAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py new file mode 100644 index 00000000..5e41fdcd --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 95adb6768908 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsDeleteRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py new file mode 100644 index 00000000..941863d0 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ef9914284afb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): + agent_id: str + version: str + + +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..dd17580d --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5918c34f1c7 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] +) + + +AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] +) + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_version: Annotated[ + OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["agent_version"]) + nullable_fields = set(["agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py new file mode 100644 index 00000000..bb1da602 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a04815e6c798 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py new file mode 100644 index 00000000..54b62e90 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19e3310c3907 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): + agent_id: str + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of versions per page""" + + +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of versions per page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page", "page_size"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_listop.py b/src/mistralai/client/models/agents_api_v1_agents_listop.py new file mode 100644 index 00000000..97b1c7f1 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_listop.py @@ -0,0 +1,115 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 25a6460a6e19 + +from __future__ import annotations +from .requestsource import RequestSource +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of agents per page""" + deployment_chat: NotRequired[Nullable[bool]] + sources: NotRequired[Nullable[List[RequestSource]]] + name: NotRequired[Nullable[str]] + r"""Filter by agent name""" + search: NotRequired[Nullable[str]] + r"""Search agents by name or ID""" + id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1AgentsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of agents per page""" + + deployment_chat: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sources: Annotated[ + OptionalNullable[List[RequestSource]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Filter by agent name""" + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Search agents by name or ID""" + + id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "search", + "id", + "metadata", + ] + ) + nullable_fields = set( + ["deployment_chat", "sources", "name", "search", "id", "metadata"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py similarity index 76% rename from src/mistralai/models/agents_api_v1_agents_update_versionop.py rename to src/mistralai/client/models/agents_api_v1_agents_update_versionop.py index 5e4b97b3..5ab821ea 100644 --- a/src/mistralai/models/agents_api_v1_agents_update_versionop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 63f61b8891bf from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/agents_api_v1_agents_updateop.py b/src/mistralai/client/models/agents_api_v1_agents_updateop.py new file mode 100644 index 00000000..69da5001 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_updateop.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb55993c932d + +from __future__ import annotations +from .updateagentrequest import UpdateAgentRequest, UpdateAgentRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): + agent_id: str + update_agent_request: UpdateAgentRequestTypedDict + + +class AgentsAPIV1AgentsUpdateRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_agent_request: Annotated[ + UpdateAgentRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py similarity index 85% rename from src/mistralai/models/agents_api_v1_conversations_append_streamop.py rename to src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py index d2489ffb..d257dc78 100644 --- a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py @@ -1,12 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ec00e0905f15 from __future__ import annotations from .conversationappendstreamrequest import ( ConversationAppendStreamRequest, ConversationAppendStreamRequestTypedDict, ) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/agents_api_v1_conversations_appendop.py b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py similarity index 84% rename from src/mistralai/models/agents_api_v1_conversations_appendop.py rename to src/mistralai/client/models/agents_api_v1_conversations_appendop.py index ba37697e..61fec083 100644 --- a/src/mistralai/models/agents_api_v1_conversations_appendop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py @@ -1,12 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 39c6125e850c from __future__ import annotations from .conversationappendrequest import ( ConversationAppendRequest, ConversationAppendRequestTypedDict, ) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py new file mode 100644 index 00000000..499645a7 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0792e6abbdcb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py new file mode 100644 index 00000000..504616ab --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c530f2fc64d0 + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsGetRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" + + +ResponseV1ConversationsGetTypedDict = TypeAliasType( + "ResponseV1ConversationsGetTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) +r"""Successful Response""" + + +ResponseV1ConversationsGet = TypeAliasType( + "ResponseV1ConversationsGet", Union[AgentConversation, ModelConversation] +) +r"""Successful Response""" diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py similarity index 79% rename from src/mistralai/models/agents_api_v1_conversations_historyop.py rename to src/mistralai/client/models/agents_api_v1_conversations_historyop.py index b8c33d1b..ef0a4eb0 100644 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f5ca33768aa from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py new file mode 100644 index 00000000..8bf66aea --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 936e36181d36 + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1ConversationsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page", "page_size", "metadata"]) + nullable_fields = set(["metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsListResponseTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) + + +AgentsAPIV1ConversationsListResponse = TypeAliasType( + "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] +) diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py similarity index 80% rename from src/mistralai/models/agents_api_v1_conversations_messagesop.py rename to src/mistralai/client/models/agents_api_v1_conversations_messagesop.py index f0dac8bf..19978a19 100644 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b5141764a708 from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py similarity index 85% rename from src/mistralai/models/agents_api_v1_conversations_restart_streamop.py rename to src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py index f39b74eb..63c74449 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py @@ -1,12 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c284a1711148 from __future__ import annotations from .conversationrestartstreamrequest import ( ConversationRestartStreamRequest, ConversationRestartStreamRequestTypedDict, ) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py similarity index 85% rename from src/mistralai/models/agents_api_v1_conversations_restartop.py rename to src/mistralai/client/models/agents_api_v1_conversations_restartop.py index f706c066..3186d5df 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py @@ -1,12 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3ba234e5a8fc from __future__ import annotations from .conversationrestartrequest import ( ConversationRestartRequest, ConversationRestartRequestTypedDict, ) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py new file mode 100644 index 00000000..6955f6ac --- /dev/null +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -0,0 +1,191 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3960bc4c545f + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionRequestStopTypedDict = TypeAliasType( + "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = TypeAliasType( + "AgentsCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionRequestToolChoice = TypeAliasType( + "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionRequestTypedDict(TypedDict): + messages: List[AgentsCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[AgentsCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionRequest(BaseModel): + messages: List[AgentsCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[AgentsCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py new file mode 100644 index 00000000..c2cf3552 --- /dev/null +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -0,0 +1,189 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b73f90befc2 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = TypeAliasType( + "AgentsCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionStreamRequestToolChoice = TypeAliasType( + "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionStreamRequestTypedDict(TypedDict): + messages: List[AgentsCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionStreamRequest(BaseModel): + messages: List[AgentsCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[AgentsCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/apiendpoint.py b/src/mistralai/client/models/apiendpoint.py new file mode 100644 index 00000000..a6665c10 --- /dev/null +++ b/src/mistralai/client/models/apiendpoint.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00b34ce0a24d + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +APIEndpoint = Union[ + Literal[ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/fim/completions", + "/v1/moderations", + "/v1/chat/moderations", + "/v1/ocr", + "/v1/classifications", + "/v1/chat/classifications", + "/v1/conversations", + "/v1/audio/transcriptions", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/archivemodelresponse.py b/src/mistralai/client/models/archivemodelresponse.py new file mode 100644 index 00000000..f1116850 --- /dev/null +++ b/src/mistralai/client/models/archivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d22c644df64 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ArchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class ArchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ArchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py new file mode 100644 index 00000000..26a778c7 --- /dev/null +++ b/src/mistralai/client/models/assistantmessage.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2b49546e0742 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + +class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py new file mode 100644 index 00000000..68866cd2 --- /dev/null +++ b/src/mistralai/client/models/audiochunk.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ce5dce4dced2 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class AudioChunkTypedDict(TypedDict): + input_audio: str + type: Literal["input_audio"] + + +class AudioChunk(BaseModel): + input_audio: str + + type: Annotated[ + Annotated[ + Literal["input_audio"], AfterValidator(validate_const("input_audio")) + ], + pydantic.Field(alias="type"), + ] = "input_audio" + + +try: + AudioChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audioencoding.py b/src/mistralai/client/models/audioencoding.py new file mode 100644 index 00000000..67fec75d --- /dev/null +++ b/src/mistralai/client/models/audioencoding.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b14e6a50f730 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +AudioEncoding = Union[ + Literal[ + "pcm_s16le", + "pcm_s32le", + "pcm_f16le", + "pcm_f32le", + "pcm_mulaw", + "pcm_alaw", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/audioformat.py b/src/mistralai/client/models/audioformat.py new file mode 100644 index 00000000..fef87ae7 --- /dev/null +++ b/src/mistralai/client/models/audioformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c8655712c218 + +from __future__ import annotations +from .audioencoding import AudioEncoding +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AudioFormatTypedDict(TypedDict): + encoding: AudioEncoding + sample_rate: int + + +class AudioFormat(BaseModel): + encoding: AudioEncoding + + sample_rate: int diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py new file mode 100644 index 00000000..fe4c79e3 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4148b4d23e7 + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to be used.""" + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[False] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequest(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + r"""ID of the model to be used.""" + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + stream: Annotated[ + Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = False + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AudioTranscriptionRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py new file mode 100644 index 00000000..2d1e9269 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -0,0 +1,115 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 33a07317a3b3 + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestStreamTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[True] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequestStream(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + stream: Annotated[ + Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = True + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + AudioTranscriptionRequestStream.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py new file mode 100644 index 00000000..9c9e9a20 --- /dev/null +++ b/src/mistralai/client/models/basemodelcard.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 556ebdc33276 + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class BaseModelCardTypedDict(TypedDict): + id: str + capabilities: ModelCapabilitiesTypedDict + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: Literal["base"] + + +class BaseModelCard(BaseModel): + id: str + + capabilities: ModelCapabilities + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + type: Annotated[ + Annotated[Literal["base"], AfterValidator(validate_const("base"))], + pydantic.Field(alias="type"), + ] = "base" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + BaseModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py new file mode 100644 index 00000000..8a353cd2 --- /dev/null +++ b/src/mistralai/client/models/batcherror.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1563e2a576ec + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchErrorTypedDict(TypedDict): + message: str + count: NotRequired[int] + + +class BatchError(BaseModel): + message: str + + count: Optional[int] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["count"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjob.py b/src/mistralai/client/models/batchjob.py new file mode 100644 index 00000000..80acac33 --- /dev/null +++ b/src/mistralai/client/models/batchjob.py @@ -0,0 +1,138 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85cd28932cc7 + +from __future__ import annotations +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobstatus import BatchJobStatus +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class BatchJobTypedDict(TypedDict): + id: str + input_files: List[str] + endpoint: str + errors: List[BatchErrorTypedDict] + status: BatchJobStatus + created_at: int + total_requests: int + completed_requests: int + succeeded_requests: int + failed_requests: int + object: Literal["batch"] + metadata: NotRequired[Nullable[Dict[str, Any]]] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + output_file: NotRequired[Nullable[str]] + error_file: NotRequired[Nullable[str]] + outputs: NotRequired[Nullable[List[Dict[str, Any]]]] + started_at: NotRequired[Nullable[int]] + completed_at: NotRequired[Nullable[int]] + + +class BatchJob(BaseModel): + id: str + + input_files: List[str] + + endpoint: str + + errors: List[BatchError] + + status: BatchJobStatus + + created_at: int + + total_requests: int + + completed_requests: int + + succeeded_requests: int + + failed_requests: int + + object: Annotated[ + Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))], + pydantic.Field(alias="object"), + ] = "batch" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + output_file: OptionalNullable[str] = UNSET + + error_file: OptionalNullable[str] = UNSET + + outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET + + started_at: OptionalNullable[int] = UNSET + + completed_at: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) + nullable_fields = set( + [ + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + BatchJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py new file mode 100644 index 00000000..bd77faa2 --- /dev/null +++ b/src/mistralai/client/models/batchjobstatus.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 61e08cf5eea9 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +BatchJobStatus = Union[ + Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py new file mode 100644 index 00000000..911a9a05 --- /dev/null +++ b/src/mistralai/client/models/batchrequest.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f36819eeb46 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class BatchRequestTypedDict(TypedDict): + body: Dict[str, Any] + custom_id: NotRequired[Nullable[str]] + + +class BatchRequest(BaseModel): + body: Dict[str, Any] + + custom_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["custom_id"]) + nullable_fields = set(["custom_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py new file mode 100644 index 00000000..ecf60d3c --- /dev/null +++ b/src/mistralai/client/models/builtinconnectors.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d276ce938dc + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +BuiltInConnectors = Union[ + Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py new file mode 100644 index 00000000..cf2aa78a --- /dev/null +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: afd9cdc71834 + +from __future__ import annotations +from .inputs import Inputs, InputsTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ChatClassificationRequestTypedDict(TypedDict): + model: str + input: InputsTypedDict + r"""Chat to classify""" + + +class ChatClassificationRequest(BaseModel): + model: str + + input: Inputs + r"""Chat to classify""" diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..2c515f6e --- /dev/null +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7e6a512f6a04 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +ChatCompletionChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: ChatCompletionChoiceFinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: ChatCompletionChoiceFinishReason diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..e871bd92 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -0,0 +1,225 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9979805d8c38 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatcompletionresponse.py b/src/mistralai/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..7092bbc1 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionresponse.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 669d996b8e82 + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..b7b2bff1 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 18cb2b2415d4 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessageTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionStreamRequestMessage] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py new file mode 100644 index 00000000..228e7d26 --- /dev/null +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 057aecb07275 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ChatModerationRequestInputs2TypedDict = TypeAliasType( + "ChatModerationRequestInputs2TypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatModerationRequestInputs2 = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputs1TypedDict = TypeAliasType( + "ChatModerationRequestInputs1TypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatModerationRequestInputs1 = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputs3TypedDict = TypeAliasType( + "ChatModerationRequestInputs3TypedDict", + Union[ + List[ChatModerationRequestInputs1TypedDict], + List[List[ChatModerationRequestInputs2TypedDict]], + ], +) +r"""Chat to classify""" + + +ChatModerationRequestInputs3 = TypeAliasType( + "ChatModerationRequestInputs3", + Union[List[ChatModerationRequestInputs1], List[List[ChatModerationRequestInputs2]]], +) +r"""Chat to classify""" + + +class ChatModerationRequestTypedDict(TypedDict): + inputs: ChatModerationRequestInputs3TypedDict + r"""Chat to classify""" + model: str + + +class ChatModerationRequest(BaseModel): + inputs: Annotated[ChatModerationRequestInputs3, pydantic.Field(alias="input")] + r"""Chat to classify""" + + model: str + + +try: + ChatModerationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/checkpoint.py b/src/mistralai/client/models/checkpoint.py new file mode 100644 index 00000000..c24e433e --- /dev/null +++ b/src/mistralai/client/models/checkpoint.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1a530d3674d8 + +from __future__ import annotations +from .metric import Metric, MetricTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CheckpointTypedDict(TypedDict): + metrics: MetricTypedDict + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + + +class Checkpoint(BaseModel): + metrics: Metric + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + step_number: int + r"""The step number that the checkpoint was created at.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py new file mode 100644 index 00000000..25b69413 --- /dev/null +++ b/src/mistralai/client/models/classificationrequest.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6942fe3de24a + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ClassificationRequestInputsTypedDict = TypeAliasType( + "ClassificationRequestInputsTypedDict", Union[str, List[str]] +) +r"""Text to classify.""" + + +ClassificationRequestInputs = TypeAliasType( + "ClassificationRequestInputs", Union[str, List[str]] +) +r"""Text to classify.""" + + +class ClassificationRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" + inputs: ClassificationRequestInputsTypedDict + r"""Text to classify.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class ClassificationRequest(BaseModel): + model: str + r"""ID of the model to use.""" + + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Text to classify.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["metadata"]) + nullable_fields = set(["metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassificationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/client/models/classificationresponse.py similarity index 88% rename from src/mistralai/models/classificationresponse.py rename to src/mistralai/client/models/classificationresponse.py index b7741f37..d2f09f43 100644 --- a/src/mistralai/models/classificationresponse.py +++ b/src/mistralai/client/models/classificationresponse.py @@ -1,11 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eaf279db1109 from __future__ import annotations from .classificationtargetresult import ( ClassificationTargetResult, ClassificationTargetResultTypedDict, ) -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing import Dict, List from typing_extensions import TypedDict diff --git a/src/mistralai/models/classificationtargetresult.py b/src/mistralai/client/models/classificationtargetresult.py similarity index 81% rename from src/mistralai/models/classificationtargetresult.py rename to src/mistralai/client/models/classificationtargetresult.py index 60c5a51b..6c7d6231 100644 --- a/src/mistralai/models/classificationtargetresult.py +++ b/src/mistralai/client/models/classificationtargetresult.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2445f12b2a57 from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing import Dict from typing_extensions import TypedDict diff --git a/src/mistralai/client/models/classifierfinetunedmodel.py b/src/mistralai/client/models/classifierfinetunedmodel.py new file mode 100644 index 00000000..fbcf5892 --- /dev/null +++ b/src/mistralai/client/models/classifierfinetunedmodel.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5a9a7a0153c8 + +from __future__ import annotations +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ClassifierFineTunedModelTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FineTunedModelCapabilitiesTypedDict + job: str + classifier_targets: List[ClassifierTargetResultTypedDict] + object: Literal["model"] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: Literal["classifier"] + + +class ClassifierFineTunedModel(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FineTunedModelCapabilities + + job: str + + classifier_targets: List[ClassifierTargetResult] + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="model_type"), + ] = "classifier" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierfinetuningjob.py b/src/mistralai/client/models/classifierfinetuningjob.py new file mode 100644 index 00000000..fb160cf8 --- /dev/null +++ b/src/mistralai/client/models/classifierfinetuningjob.py @@ -0,0 +1,201 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a244d5f2afc5 + +from __future__ import annotations +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierFineTuningJobStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] +r"""The current status of the fine-tuning job.""" + + +ClassifierFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobIntegration(BaseModel): + r"""A ClassifierFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CLASSIFIER_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +ClassifierFineTuningJobIntegration = WandbIntegrationResult + + +class ClassifierFineTuningJobTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + status: ClassifierFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: ClassifierTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: Literal["job"] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobIntegrationTypedDict]] + ] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["classifier"] + r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierFineTuningJob(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + + status: ClassifierFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: ClassifierTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[ClassifierFineTuningJobIntegration]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" + r"""The type of job (`FT` for fine-tuning).""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierfinetuningjobdetails.py b/src/mistralai/client/models/classifierfinetuningjobdetails.py new file mode 100644 index 00000000..5d73f55e --- /dev/null +++ b/src/mistralai/client/models/classifierfinetuningjobdetails.py @@ -0,0 +1,197 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75c5dee8df2e + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +ClassifierFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobDetailsIntegration(BaseModel): + r"""A ClassifierFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CLASSIFIER_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +ClassifierFineTuningJobDetailsIntegration = WandbIntegrationResult + + +class ClassifierFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: ClassifierFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetResultTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["classifier"] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class ClassifierFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: ClassifierFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetResult] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifiertarget.py b/src/mistralai/client/models/classifiertarget.py new file mode 100644 index 00000000..4d66d789 --- /dev/null +++ b/src/mistralai/client/models/classifiertarget.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2177d51d9dcf + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTargetTypedDict(TypedDict): + name: str + labels: List[str] + weight: NotRequired[float] + loss_function: NotRequired[Nullable[FTClassifierLossFunction]] + + +class ClassifierTarget(BaseModel): + name: str + + labels: List[str] + + weight: Optional[float] = 1 + + loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["weight", "loss_function"]) + nullable_fields = set(["loss_function"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertargetresult.py b/src/mistralai/client/models/classifiertargetresult.py new file mode 100644 index 00000000..8ce7c0ca --- /dev/null +++ b/src/mistralai/client/models/classifiertargetresult.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19c343844888 + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ClassifierTargetResultTypedDict(TypedDict): + name: str + labels: List[str] + weight: float + loss_function: FTClassifierLossFunction + + +class ClassifierTargetResult(BaseModel): + name: str + + labels: List[str] + + weight: float + + loss_function: FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py new file mode 100644 index 00000000..14fa4926 --- /dev/null +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -0,0 +1,73 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4000b05e3b8d + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + ) + nullable_fields = set( + ["training_steps", "weight_decay", "warmup_fraction", "epochs", "seq_len"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py new file mode 100644 index 00000000..ce14265f --- /dev/null +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 950cd8f4ad49 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class CodeInterpreterToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["code_interpreter"] + + +class CodeInterpreterTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["code_interpreter"], + AfterValidator(validate_const("code_interpreter")), + ], + pydantic.Field(alias="type"), + ] = "code_interpreter" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CodeInterpreterTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py new file mode 100644 index 00000000..ab5cf5ff --- /dev/null +++ b/src/mistralai/client/models/completionargs.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3db008bcddca + +from __future__ import annotations +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .toolchoiceenum import ToolChoiceEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionArgsTypedDict(TypedDict): + r"""White-listed arguments from the completion API""" + + stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] + presence_penalty: NotRequired[Nullable[float]] + frequency_penalty: NotRequired[Nullable[float]] + temperature: NotRequired[Nullable[float]] + top_p: NotRequired[Nullable[float]] + max_tokens: NotRequired[Nullable[int]] + random_seed: NotRequired[Nullable[int]] + prediction: NotRequired[Nullable[PredictionTypedDict]] + response_format: NotRequired[Nullable[ResponseFormatTypedDict]] + tool_choice: NotRequired[ToolChoiceEnum] + + +class CompletionArgs(BaseModel): + r"""White-listed arguments from the completion API""" + + stop: OptionalNullable[CompletionArgsStop] = UNSET + + presence_penalty: OptionalNullable[float] = UNSET + + frequency_penalty: OptionalNullable[float] = UNSET + + temperature: OptionalNullable[float] = UNSET + + top_p: OptionalNullable[float] = UNSET + + max_tokens: OptionalNullable[int] = UNSET + + random_seed: OptionalNullable[int] = UNSET + + prediction: OptionalNullable[Prediction] = UNSET + + response_format: OptionalNullable[ResponseFormat] = UNSET + + tool_choice: Optional[ToolChoiceEnum] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + ) + nullable_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/models/completionargsstop.py b/src/mistralai/client/models/completionargsstop.py similarity index 92% rename from src/mistralai/models/completionargsstop.py rename to src/mistralai/client/models/completionargsstop.py index de7a0956..39c858e6 100644 --- a/src/mistralai/models/completionargsstop.py +++ b/src/mistralai/client/models/completionargsstop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f339214501d from __future__ import annotations from typing import List, Union diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py new file mode 100644 index 00000000..5fd6c173 --- /dev/null +++ b/src/mistralai/client/models/completionchunk.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d786b44926f4 + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionevent.py b/src/mistralai/client/models/completionevent.py new file mode 100644 index 00000000..3b90ab0c --- /dev/null +++ b/src/mistralai/client/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c68817e7e190 + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/src/mistralai/client/models/completionfinetunedmodel.py b/src/mistralai/client/models/completionfinetunedmodel.py new file mode 100644 index 00000000..54a1c165 --- /dev/null +++ b/src/mistralai/client/models/completionfinetunedmodel.py @@ -0,0 +1,110 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f08c10d149f5 + +from __future__ import annotations +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class CompletionFineTunedModelTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FineTunedModelCapabilitiesTypedDict + job: str + object: Literal["model"] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: Literal["completion"] + + +class CompletionFineTunedModel(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FineTunedModelCapabilities + + job: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="model_type"), + ] = "completion" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionfinetuningjob.py b/src/mistralai/client/models/completionfinetuningjob.py new file mode 100644 index 00000000..1bf0a730 --- /dev/null +++ b/src/mistralai/client/models/completionfinetuningjob.py @@ -0,0 +1,227 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c242237efe9b + +from __future__ import annotations +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionFineTuningJobStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] +r"""The current status of the fine-tuning job.""" + + +CompletionFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobIntegration(BaseModel): + r"""A CompletionFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobIntegration = WandbIntegrationResult + + +CompletionFineTuningJobRepositoryTypedDict = GithubRepositoryTypedDict + + +class UnknownCompletionFineTuningJobRepository(BaseModel): + r"""A CompletionFineTuningJobRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobRepository = GithubRepository + + +class CompletionFineTuningJobTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + status: CompletionFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: Literal["job"] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobIntegrationTypedDict]] + ] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["completion"] + r"""The type of job (`FT` for fine-tuning).""" + repositories: NotRequired[List[CompletionFineTuningJobRepositoryTypedDict]] + + +class CompletionFineTuningJob(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + + status: CompletionFineTuningJobStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[CompletionFineTuningJobIntegration]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" + r"""The type of job (`FT` for fine-tuning).""" + + repositories: Optional[List[CompletionFineTuningJobRepository]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionfinetuningjobdetails.py b/src/mistralai/client/models/completionfinetuningjobdetails.py new file mode 100644 index 00000000..cb787021 --- /dev/null +++ b/src/mistralai/client/models/completionfinetuningjobdetails.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e8379265af48 + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +CompletionFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobDetailsIntegration(BaseModel): + r"""A CompletionFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobDetailsIntegration = WandbIntegrationResult + + +CompletionFineTuningJobDetailsRepositoryTypedDict = GithubRepositoryTypedDict + + +class UnknownCompletionFineTuningJobDetailsRepository(BaseModel): + r"""A CompletionFineTuningJobDetailsRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobDetailsRepository = GithubRepository + + +class CompletionFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: CompletionFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["completion"] + repositories: NotRequired[List[CompletionFineTuningJobDetailsRepositoryTypedDict]] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class CompletionFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: CompletionFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[CompletionFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" + + repositories: Optional[List[CompletionFineTuningJobDetailsRepository]] = None + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..a52ae892 --- /dev/null +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5969a6bc07f3 + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py new file mode 100644 index 00000000..ca50a7ad --- /dev/null +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: be202ea0d5a6 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] + + +class CompletionTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) + nullable_fields = set( + [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py new file mode 100644 index 00000000..e3de7591 --- /dev/null +++ b/src/mistralai/client/models/contentchunk.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c007f5ee0325 + +from __future__ import annotations +from .audiochunk import AudioChunk, AudioChunkTypedDict +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + ReferenceChunkTypedDict, + FileChunkTypedDict, + AudioChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ], +) + + +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "document_url": DocumentURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, + "file": FileChunk, + "thinking": ThinkChunk, + "input_audio": AudioChunk, +} + + +ContentChunk = Annotated[ + Union[ + ImageURLChunk, + DocumentURLChunk, + TextChunk, + ReferenceChunk, + FileChunk, + ThinkChunk, + AudioChunk, + UnknownContentChunk, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), +] diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py new file mode 100644 index 00000000..386714fd --- /dev/null +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81ce529e0865 + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendRequestTypedDict(TypedDict): + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] + + +class ConversationAppendRequest(BaseModel): + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py new file mode 100644 index 00000000..32f6b148 --- /dev/null +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 27ada745e6ad + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendStreamRequestTypedDict(TypedDict): + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] + + +class ConversationAppendStreamRequest(BaseModel): + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py new file mode 100644 index 00000000..17812983 --- /dev/null +++ b/src/mistralai/client/models/conversationevents.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c8b08d853f6 + +from __future__ import annotations +from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, +) +from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict +from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict +from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict +from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict +from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict +from .ssetypes import SSETypes +from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventTypedDict, +) +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ConversationEventsDataTypedDict = TypeAliasType( + "ConversationEventsDataTypedDict", + Union[ + ResponseStartedEventTypedDict, + ResponseDoneEventTypedDict, + ResponseErrorEventTypedDict, + ToolExecutionDeltaEventTypedDict, + ToolExecutionDoneEventTypedDict, + AgentHandoffStartedEventTypedDict, + AgentHandoffDoneEventTypedDict, + ToolExecutionStartedEventTypedDict, + MessageOutputEventTypedDict, + FunctionCallEventTypedDict, + ], +) + + +class UnknownConversationEventsData(BaseModel): + r"""A ConversationEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONVERSATION_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "agent.handoff.done": AgentHandoffDoneEvent, + "agent.handoff.started": AgentHandoffStartedEvent, + "conversation.response.done": ResponseDoneEvent, + "conversation.response.error": ResponseErrorEvent, + "conversation.response.started": ResponseStartedEvent, + "function.call.delta": FunctionCallEvent, + "message.output.delta": MessageOutputEvent, + "tool.execution.delta": ToolExecutionDeltaEvent, + "tool.execution.done": ToolExecutionDoneEvent, + "tool.execution.started": ToolExecutionStartedEvent, +} + + +ConversationEventsData = Annotated[ + Union[ + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDeltaEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, + UnknownConversationEventsData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONVERSATION_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownConversationEventsData, + union_name="ConversationEventsData", + ) + ), +] + + +class ConversationEventsTypedDict(TypedDict): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + data: ConversationEventsDataTypedDict + + +class ConversationEvents(BaseModel): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + + data: ConversationEventsData diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py new file mode 100644 index 00000000..ceef115b --- /dev/null +++ b/src/mistralai/client/models/conversationhistory.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60a51ff1682b + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +EntryTypedDict = TypeAliasType( + "EntryTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + ], +) + + +Entry = TypeAliasType( + "Entry", + Union[ + FunctionResultEntry, + MessageInputEntry, + MessageOutputEntry, + AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, + ], +) + + +class ConversationHistoryTypedDict(TypedDict): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + entries: List[EntryTypedDict] + object: Literal["conversation.history"] + + +class ConversationHistory(BaseModel): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + + entries: List[Entry] + + object: Annotated[ + Annotated[ + Optional[Literal["conversation.history"]], + AfterValidator(validate_const("conversation.history")), + ], + pydantic.Field(alias="object"), + ] = "conversation.history" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationHistory.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/conversationinputs.py b/src/mistralai/client/models/conversationinputs.py similarity index 93% rename from src/mistralai/models/conversationinputs.py rename to src/mistralai/client/models/conversationinputs.py index 4d30cd76..7ce3ffc3 100644 --- a/src/mistralai/models/conversationinputs.py +++ b/src/mistralai/client/models/conversationinputs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 711b769f2c40 from __future__ import annotations from .inputentries import InputEntries, InputEntriesTypedDict diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py new file mode 100644 index 00000000..84664b62 --- /dev/null +++ b/src/mistralai/client/models/conversationmessages.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 011c39501c26 + +from __future__ import annotations +from .messageentries import MessageEntries, MessageEntriesTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ConversationMessagesTypedDict(TypedDict): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + messages: List[MessageEntriesTypedDict] + object: Literal["conversation.messages"] + + +class ConversationMessages(BaseModel): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + + messages: List[MessageEntries] + + object: Annotated[ + Annotated[ + Optional[Literal["conversation.messages"]], + AfterValidator(validate_const("conversation.messages")), + ], + pydantic.Field(alias="object"), + ] = "conversation.messages" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationMessages.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py new file mode 100644 index 00000000..83d599eb --- /dev/null +++ b/src/mistralai/client/models/conversationrequest.py @@ -0,0 +1,163 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58e3ae67f149 + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRequestToolTypedDict = TypeAliasType( + "ConversationRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +ConversationRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationRequestAgentVersion = TypeAliasType( + "ConversationRequestAgentVersion", Union[str, int] +) + + +class ConversationRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ConversationRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[ConversationRequestAgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationRequestHandoffExecution] = UNSET + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[ConversationRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[ConversationRequestAgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py new file mode 100644 index 00000000..f6c10969 --- /dev/null +++ b/src/mistralai/client/models/conversationresponse.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ad7a8472c7bf + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +OutputTypedDict = TypeAliasType( + "OutputTypedDict", + Union[ + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + ], +) + + +Output = TypeAliasType( + "Output", + Union[MessageOutputEntry, AgentHandoffEntry, ToolExecutionEntry, FunctionCallEntry], +) + + +class ConversationResponseTypedDict(TypedDict): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + outputs: List[OutputTypedDict] + usage: ConversationUsageInfoTypedDict + object: Literal["conversation.response"] + + +class ConversationResponse(BaseModel): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + + outputs: List[Output] + + usage: ConversationUsageInfo + + object: Annotated[ + Annotated[ + Optional[Literal["conversation.response"]], + AfterValidator(validate_const("conversation.response")), + ], + pydantic.Field(alias="object"), + ] = "conversation.response" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py new file mode 100644 index 00000000..7ae16aff --- /dev/null +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -0,0 +1,112 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 681d90d50514 + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartRequestAgentVersion = TypeAliasType( + "ConversationRestartRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py new file mode 100644 index 00000000..0e247261 --- /dev/null +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 521c2b5bfb2b + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartStreamRequestAgentVersion = TypeAliasType( + "ConversationRestartStreamRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + from_entry_id: str + + inputs: Optional[ConversationInputs] = None + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( + UNSET + ) + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py new file mode 100644 index 00000000..a20dccae --- /dev/null +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -0,0 +1,165 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58d633507527 + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationStreamRequestToolTypedDict = TypeAliasType( + "ConversationStreamRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationStreamRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationStreamRequestAgentVersion = TypeAliasType( + "ConversationStreamRequestAgentVersion", Union[str, int] +) + + +class ConversationStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ConversationStreamRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( + UNSET + ) + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[ConversationStreamRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationthinkchunk.py b/src/mistralai/client/models/conversationthinkchunk.py new file mode 100644 index 00000000..e0e172e3 --- /dev/null +++ b/src/mistralai/client/models/conversationthinkchunk.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77e59cde5c0f + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationThinkChunkThinkingTypedDict = TypeAliasType( + "ConversationThinkChunkThinkingTypedDict", + Union[TextChunkTypedDict, ToolReferenceChunkTypedDict], +) + + +ConversationThinkChunkThinking = TypeAliasType( + "ConversationThinkChunkThinking", Union[TextChunk, ToolReferenceChunk] +) + + +class ConversationThinkChunkTypedDict(TypedDict): + thinking: List[ConversationThinkChunkThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + + +class ConversationThinkChunk(BaseModel): + thinking: List[ConversationThinkChunkThinking] + + type: Annotated[ + Annotated[ + Optional[Literal["thinking"]], AfterValidator(validate_const("thinking")) + ], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py new file mode 100644 index 00000000..1e80f89e --- /dev/null +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6685e3b50b50 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConversationUsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + connector_tokens: NotRequired[Nullable[int]] + connectors: NotRequired[Nullable[Dict[str, int]]] + + +class ConversationUsageInfo(BaseModel): + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + connector_tokens: OptionalNullable[int] = UNSET + + connectors: OptionalNullable[Dict[str, int]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + ) + nullable_fields = set(["connector_tokens", "connectors"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createagentrequest.py b/src/mistralai/client/models/createagentrequest.py new file mode 100644 index 00000000..54b09880 --- /dev/null +++ b/src/mistralai/client/models/createagentrequest.py @@ -0,0 +1,122 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 442629bd914b + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +CreateAgentRequestToolTypedDict = TypeAliasType( + "CreateAgentRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +CreateAgentRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +class CreateAgentRequestTypedDict(TypedDict): + model: str + name: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[CreateAgentRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] + + +class CreateAgentRequest(BaseModel): + model: str + + name: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[CreateAgentRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + version_message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createbatchjobrequest.py b/src/mistralai/client/models/createbatchjobrequest.py new file mode 100644 index 00000000..9a901fef --- /dev/null +++ b/src/mistralai/client/models/createbatchjobrequest.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 56e24cd24e98 + +from __future__ import annotations +from .apiendpoint import APIEndpoint +from .batchrequest import BatchRequest, BatchRequestTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CreateBatchJobRequestTypedDict(TypedDict): + endpoint: APIEndpoint + input_files: NotRequired[Nullable[List[str]]] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] + model: NotRequired[Nullable[str]] + r"""The model to be used for batch inference.""" + agent_id: NotRequired[Nullable[str]] + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + metadata: NotRequired[Nullable[Dict[str, str]]] + r"""The metadata of your choice to be associated with the batch inference job.""" + timeout_hours: NotRequired[int] + r"""The timeout in hours for the batch inference job.""" + + +class CreateBatchJobRequest(BaseModel): + endpoint: APIEndpoint + + input_files: OptionalNullable[List[str]] = UNSET + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + + requests: OptionalNullable[List[BatchRequest]] = UNSET + + model: OptionalNullable[str] = UNSET + r"""The model to be used for batch inference.""" + + agent_id: OptionalNullable[str] = UNSET + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + + metadata: OptionalNullable[Dict[str, str]] = UNSET + r"""The metadata of your choice to be associated with the batch inference job.""" + + timeout_hours: Optional[int] = 24 + r"""The timeout in hours for the batch inference job.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + ) + nullable_fields = set( + ["input_files", "requests", "model", "agent_id", "metadata"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createfileresponse.py b/src/mistralai/client/models/createfileresponse.py new file mode 100644 index 00000000..76821280 --- /dev/null +++ b/src/mistralai/client/models/createfileresponse.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fea5e4832dcc + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class CreateFileResponseTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class CreateFileResponse(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CreateFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/createfinetuningjobrequest.py b/src/mistralai/client/models/createfinetuningjobrequest.py new file mode 100644 index 00000000..e328d944 --- /dev/null +++ b/src/mistralai/client/models/createfinetuningjobrequest.py @@ -0,0 +1,146 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c60d2a45d66b + +from __future__ import annotations +from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .finetuneablemodeltype import FineTuneableModelType +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +CreateFineTuningJobRequestIntegrationTypedDict = WandbIntegrationTypedDict + + +CreateFineTuningJobRequestIntegration = WandbIntegration + + +HyperparametersTypedDict = TypeAliasType( + "HyperparametersTypedDict", + Union[ClassifierTrainingParametersTypedDict, CompletionTrainingParametersTypedDict], +) + + +Hyperparameters = TypeAliasType( + "Hyperparameters", Union[ClassifierTrainingParameters, CompletionTrainingParameters] +) + + +CreateFineTuningJobRequestRepositoryTypedDict = GithubRepositoryInTypedDict + + +CreateFineTuningJobRequestRepository = GithubRepositoryIn + + +class CreateFineTuningJobRequestTypedDict(TypedDict): + model: str + hyperparameters: HyperparametersTypedDict + training_files: NotRequired[List[TrainingFileTypedDict]] + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: NotRequired[Nullable[str]] + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: NotRequired[ + Nullable[List[CreateFineTuningJobRequestIntegrationTypedDict]] + ] + r"""A list of integrations to enable for your fine-tuning job.""" + auto_start: NotRequired[bool] + r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: NotRequired[float] + job_type: NotRequired[Nullable[FineTuneableModelType]] + repositories: NotRequired[ + Nullable[List[CreateFineTuningJobRequestRepositoryTypedDict]] + ] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetTypedDict]]] + + +class CreateFineTuningJobRequest(BaseModel): + model: str + + hyperparameters: Hyperparameters + + training_files: Optional[List[TrainingFile]] = None + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + + suffix: OptionalNullable[str] = UNSET + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + + integrations: OptionalNullable[List[CreateFineTuningJobRequestIntegration]] = UNSET + r"""A list of integrations to enable for your fine-tuning job.""" + + auto_start: Optional[bool] = None + r"""This field will be required in a future release.""" + + invalid_sample_skip_percentage: Optional[float] = 0 + + job_type: OptionalNullable[FineTuneableModelType] = UNSET + + repositories: OptionalNullable[List[CreateFineTuningJobRequestRepository]] = UNSET + + classifier_targets: OptionalNullable[List[ClassifierTarget]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "training_files", + "validation_files", + "suffix", + "integrations", + "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + ) + nullable_fields = set( + [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createlibraryrequest.py b/src/mistralai/client/models/createlibraryrequest.py new file mode 100644 index 00000000..58874e01 --- /dev/null +++ b/src/mistralai/client/models/createlibraryrequest.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1c489bec2f53 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class CreateLibraryRequestTypedDict(TypedDict): + name: str + description: NotRequired[Nullable[str]] + chunk_size: NotRequired[Nullable[int]] + + +class CreateLibraryRequest(BaseModel): + name: str + + description: OptionalNullable[str] = UNSET + + chunk_size: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "chunk_size"]) + nullable_fields = set(["description", "chunk_size"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py similarity index 77% rename from src/mistralai/models/delete_model_v1_models_model_id_deleteop.py rename to src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py index 4acb8d53..199614f5 100644 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 767aba526e43 from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/deletefileresponse.py b/src/mistralai/client/models/deletefileresponse.py new file mode 100644 index 00000000..ffd0e0d0 --- /dev/null +++ b/src/mistralai/client/models/deletefileresponse.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3ee464763a32 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DeleteFileResponseTypedDict(TypedDict): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + + +class DeleteFileResponse(BaseModel): + id: str + r"""The ID of the deleted file.""" + + object: str + r"""The object type that was deleted""" + + deleted: bool + r"""The deletion status.""" diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py new file mode 100644 index 00000000..fa0c20a4 --- /dev/null +++ b/src/mistralai/client/models/deletemodelout.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ef6a1671c739 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class DeleteModelOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted model.""" + object: NotRequired[str] + r"""The object type that was deleted""" + deleted: NotRequired[bool] + r"""The deletion status""" + + +class DeleteModelOut(BaseModel): + id: str + r"""The ID of the deleted model.""" + + object: Optional[str] = "model" + r"""The object type that was deleted""" + + deleted: Optional[bool] = True + r"""The deletion status""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "deleted"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py new file mode 100644 index 00000000..d9fa230e --- /dev/null +++ b/src/mistralai/client/models/deltamessage.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68f53d67a140 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[DeltaMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/document.py b/src/mistralai/client/models/document.py new file mode 100644 index 00000000..31eebbd1 --- /dev/null +++ b/src/mistralai/client/models/document.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fbbf7428328c + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class DocumentTypedDict(TypedDict): + id: str + library_id: str + hash: Nullable[str] + mime_type: Nullable[str] + extension: Nullable[str] + size: Nullable[int] + name: str + created_at: datetime + uploaded_by_id: Nullable[str] + uploaded_by_type: str + processing_status: str + tokens_processing_total: int + summary: NotRequired[Nullable[str]] + last_processed_at: NotRequired[Nullable[datetime]] + number_of_pages: NotRequired[Nullable[int]] + tokens_processing_main_content: NotRequired[Nullable[int]] + tokens_processing_summary: NotRequired[Nullable[int]] + url: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, Any]]] + + +class Document(BaseModel): + id: str + + library_id: str + + hash: Nullable[str] + + mime_type: Nullable[str] + + extension: Nullable[str] + + size: Nullable[int] + + name: str + + created_at: datetime + + uploaded_by_id: Nullable[str] + + uploaded_by_type: str + + processing_status: str + + tokens_processing_total: int + + summary: OptionalNullable[str] = UNSET + + last_processed_at: OptionalNullable[datetime] = UNSET + + number_of_pages: OptionalNullable[int] = UNSET + + tokens_processing_main_content: OptionalNullable[int] = UNSET + + tokens_processing_summary: OptionalNullable[int] = UNSET + + url: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) + nullable_fields = set( + [ + "hash", + "mime_type", + "extension", + "size", + "summary", + "last_processed_at", + "number_of_pages", + "uploaded_by_id", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py new file mode 100644 index 00000000..642c3202 --- /dev/null +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3eb3c218f457 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentLibraryToolTypedDict(TypedDict): + library_ids: List[str] + r"""Ids of the library in which to search.""" + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["document_library"] + + +class DocumentLibraryTool(BaseModel): + library_ids: List[str] + r"""Ids of the library in which to search.""" + + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["document_library"], + AfterValidator(validate_const("document_library")), + ], + pydantic.Field(alias="type"), + ] = "document_library" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentLibraryTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/documenttextcontent.py b/src/mistralai/client/models/documenttextcontent.py similarity index 77% rename from src/mistralai/models/documenttextcontent.py rename to src/mistralai/client/models/documenttextcontent.py index c02528c2..b6904cb4 100644 --- a/src/mistralai/models/documenttextcontent.py +++ b/src/mistralai/client/models/documenttextcontent.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e730005e44cb from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing_extensions import TypedDict diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py new file mode 100644 index 00000000..43444d98 --- /dev/null +++ b/src/mistralai/client/models/documenturlchunk.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4309807f6048 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + type: Literal["document_url"] + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + + +class DocumentURLChunk(BaseModel): + document_url: str + + type: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingdtype.py b/src/mistralai/client/models/embeddingdtype.py new file mode 100644 index 00000000..732c4ebe --- /dev/null +++ b/src/mistralai/client/models/embeddingdtype.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77f9526a78df + +from __future__ import annotations +from typing import Literal + + +EmbeddingDtype = Literal[ + "float", + "int8", + "uint8", + "binary", + "ubinary", +] diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py new file mode 100644 index 00000000..15950590 --- /dev/null +++ b/src/mistralai/client/models/embeddingrequest.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eadbe3f9040c + +from __future__ import annotations +from .embeddingdtype import EmbeddingDtype +from .encodingformat import EncodingFormat +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +EmbeddingRequestInputsTypedDict = TypeAliasType( + "EmbeddingRequestInputsTypedDict", Union[str, List[str]] +) +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + +EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + +class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""The ID of the model to be used for embedding.""" + inputs: EmbeddingRequestInputsTypedDict + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + output_dimension: NotRequired[Nullable[int]] + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + output_dtype: NotRequired[EmbeddingDtype] + encoding_format: NotRequired[EncodingFormat] + + +class EmbeddingRequest(BaseModel): + model: str + r"""The ID of the model to be used for embedding.""" + + inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + output_dimension: OptionalNullable[int] = UNSET + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + + output_dtype: Optional[EmbeddingDtype] = None + + encoding_format: Optional[EncodingFormat] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["metadata", "output_dimension", "output_dtype", "encoding_format"] + ) + nullable_fields = set(["metadata", "output_dimension"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + EmbeddingRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/client/models/embeddingresponse.py similarity index 89% rename from src/mistralai/models/embeddingresponse.py rename to src/mistralai/client/models/embeddingresponse.py index aae6fa60..6ffd6894 100644 --- a/src/mistralai/models/embeddingresponse.py +++ b/src/mistralai/client/models/embeddingresponse.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f7d790e84b65 from __future__ import annotations from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py new file mode 100644 index 00000000..098cfae0 --- /dev/null +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d6ead6f3803 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class EmbeddingResponseDataTypedDict(TypedDict): + object: NotRequired[str] + embedding: NotRequired[List[float]] + index: NotRequired[int] + + +class EmbeddingResponseData(BaseModel): + object: Optional[str] = None + + embedding: Optional[List[float]] = None + + index: Optional[int] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "embedding", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/encodingformat.py b/src/mistralai/client/models/encodingformat.py new file mode 100644 index 00000000..4a39d029 --- /dev/null +++ b/src/mistralai/client/models/encodingformat.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b51ec296cc92 + +from __future__ import annotations +from typing import Literal + + +EncodingFormat = Literal[ + "float", + "base64", +] diff --git a/src/mistralai/client/models/entitytype.py b/src/mistralai/client/models/entitytype.py new file mode 100644 index 00000000..56d82cbe --- /dev/null +++ b/src/mistralai/client/models/entitytype.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 62d6a6a13288 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +EntityType = Union[ + Literal[ + "User", + "Workspace", + "Org", + ], + UnrecognizedStr, +] +r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/client/models/event.py b/src/mistralai/client/models/event.py new file mode 100644 index 00000000..c40ae2b1 --- /dev/null +++ b/src/mistralai/client/models/event.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e5a68ac2dd57 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class EventTypedDict(TypedDict): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: NotRequired[Nullable[Dict[str, Any]]] + + +class Event(BaseModel): + name: str + r"""The name of the event.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + data: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data"]) + nullable_fields = set(["data"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py new file mode 100644 index 00000000..1b0ea1d4 --- /dev/null +++ b/src/mistralai/client/models/file.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f972c39edfcf + +from __future__ import annotations +import io +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from pydantic import model_serializer +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[ + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["contentType"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py new file mode 100644 index 00000000..5c8d2646 --- /dev/null +++ b/src/mistralai/client/models/filechunk.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ff3c2d33ab1e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + type: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/filepurpose.py b/src/mistralai/client/models/filepurpose.py new file mode 100644 index 00000000..49a5568f --- /dev/null +++ b/src/mistralai/client/models/filepurpose.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a11e7f9f2d45 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FilePurpose = Union[ + Literal[ + "fine-tune", + "batch", + "ocr", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/files_api_routes_delete_fileop.py b/src/mistralai/client/models/files_api_routes_delete_fileop.py new file mode 100644 index 00000000..eaba274b --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_delete_fileop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f385cc6138f + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDeleteFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_download_fileop.py b/src/mistralai/client/models/files_api_routes_download_fileop.py new file mode 100644 index 00000000..83de8e73 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_download_fileop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8184ee3577c3 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDownloadFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py new file mode 100644 index 00000000..64cd6ac5 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0a1a18c6431e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): + file_id: str + expiry: NotRequired[int] + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + +class FilesAPIRoutesGetSignedURLRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + expiry: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 24 + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["expiry"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/files_api_routes_list_filesop.py b/src/mistralai/client/models/files_api_routes_list_filesop.py new file mode 100644 index 00000000..b03e2f88 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_list_filesop.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2e92f2a29b4 + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + include_total: NotRequired[bool] + sample_type: NotRequired[Nullable[List[SampleType]]] + source: NotRequired[Nullable[List[Source]]] + search: NotRequired[Nullable[str]] + purpose: NotRequired[Nullable[FilePurpose]] + mimetypes: NotRequired[Nullable[List[str]]] + + +class FilesAPIRoutesListFilesRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + include_total: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = True + + sample_type: Annotated[ + OptionalNullable[List[SampleType]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + source: Annotated[ + OptionalNullable[List[Source]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + purpose: Annotated[ + OptionalNullable[FilePurpose], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + mimetypes: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "include_total", + "sample_type", + "source", + "search", + "purpose", + "mimetypes", + ] + ) + nullable_fields = set( + ["sample_type", "source", "search", "purpose", "mimetypes"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py new file mode 100644 index 00000000..5f8de05f --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5d5dbb8d5f7a + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesRetrieveFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py new file mode 100644 index 00000000..54ff4e49 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f13b84de6fa7 + +from __future__ import annotations +from .file import File, FileTypedDict +from .filepurpose import FilePurpose +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class MultiPartBodyParamsTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + purpose: NotRequired[FilePurpose] + + +class MultiPartBodyParams(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["purpose"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py new file mode 100644 index 00000000..e99066a9 --- /dev/null +++ b/src/mistralai/client/models/fileschema.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19cde41ca32a + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileSchemaTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class FileSchema(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FileSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py new file mode 100644 index 00000000..ea877213 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -0,0 +1,130 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf3558adc3ab + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fimcompletionresponse.py b/src/mistralai/client/models/fimcompletionresponse.py new file mode 100644 index 00000000..1345a116 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionresponse.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b860d2ba771e + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class FIMCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..e80efc09 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d1ee09f1913 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/finetuneablemodeltype.py b/src/mistralai/client/models/finetuneablemodeltype.py new file mode 100644 index 00000000..7b924bd7 --- /dev/null +++ b/src/mistralai/client/models/finetuneablemodeltype.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 05e097395df3 + +from __future__ import annotations +from typing import Literal + + +FineTuneableModelType = Literal[ + "completion", + "classifier", +] diff --git a/src/mistralai/client/models/finetunedmodelcapabilities.py b/src/mistralai/client/models/finetunedmodelcapabilities.py new file mode 100644 index 00000000..2f4cca0b --- /dev/null +++ b/src/mistralai/client/models/finetunedmodelcapabilities.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 475c805eab95 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class FineTunedModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + classification: NotRequired[bool] + + +class FineTunedModelCapabilities(BaseModel): + completion_chat: Optional[bool] = True + + completion_fim: Optional[bool] = False + + function_calling: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "completion_fim", + "function_calling", + "fine_tuning", + "classification", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py new file mode 100644 index 00000000..ccb0f21b --- /dev/null +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d21e2a36ab1f + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FTClassifierLossFunction = Union[ + Literal[ + "single_class", + "multi_class", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py new file mode 100644 index 00000000..2c26ff2f --- /dev/null +++ b/src/mistralai/client/models/ftmodelcard.py @@ -0,0 +1,132 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4f15eed2ca2 + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FTModelCardTypedDict(TypedDict): + r"""Extra fields for fine-tuned models.""" + + id: str + capabilities: ModelCapabilitiesTypedDict + job: str + root: str + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: Literal["fine-tuned"] + archived: NotRequired[bool] + + +class FTModelCard(BaseModel): + r"""Extra fields for fine-tuned models.""" + + id: str + + capabilities: ModelCapabilities + + job: str + + root: str + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + type: Annotated[ + Annotated[Literal["fine-tuned"], AfterValidator(validate_const("fine-tuned"))], + pydantic.Field(alias="type"), + ] = "fine-tuned" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "archived", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FTModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py new file mode 100644 index 00000000..1da1dcc9 --- /dev/null +++ b/src/mistralai/client/models/function.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 32275a9d8fee + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/functioncall.py b/src/mistralai/client/models/functioncall.py new file mode 100644 index 00000000..527c3ad4 --- /dev/null +++ b/src/mistralai/client/models/functioncall.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 393fca552632 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py new file mode 100644 index 00000000..d05fad85 --- /dev/null +++ b/src/mistralai/client/models/functioncallentry.py @@ -0,0 +1,124 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cd058446c0aa + +from __future__ import annotations +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +FunctionCallEntryConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] + + +class FunctionCallEntryTypedDict(TypedDict): + tool_call_id: str + name: str + arguments: FunctionCallEntryArgumentsTypedDict + object: Literal["entry"] + type: Literal["function.call"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + id: NotRequired[str] + confirmation_status: NotRequired[Nullable[FunctionCallEntryConfirmationStatus]] + + +class FunctionCallEntry(BaseModel): + tool_call_id: str + + name: str + + arguments: FunctionCallEntryArguments + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["function.call"]], + AfterValidator(validate_const("function.call")), + ], + pydantic.Field(alias="type"), + ] = "function.call" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + id: Optional[str] = None + + confirmation_status: OptionalNullable[FunctionCallEntryConfirmationStatus] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "confirmation_status", + ] + ) + nullable_fields = set( + ["completed_at", "agent_id", "model", "confirmation_status"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionCallEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/functioncallentryarguments.py b/src/mistralai/client/models/functioncallentryarguments.py similarity index 93% rename from src/mistralai/models/functioncallentryarguments.py rename to src/mistralai/client/models/functioncallentryarguments.py index ac9e6227..afe81b24 100644 --- a/src/mistralai/models/functioncallentryarguments.py +++ b/src/mistralai/client/models/functioncallentryarguments.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df3767a7b93 from __future__ import annotations from typing import Any, Dict, Union diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py new file mode 100644 index 00000000..849eed76 --- /dev/null +++ b/src/mistralai/client/models/functioncallevent.py @@ -0,0 +1,103 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 23b120b8f122 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +FunctionCallEventConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] + + +class FunctionCallEventTypedDict(TypedDict): + id: str + name: str + tool_call_id: str + arguments: str + type: Literal["function.call.delta"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + confirmation_status: NotRequired[Nullable[FunctionCallEventConfirmationStatus]] + + +class FunctionCallEvent(BaseModel): + id: str + + name: str + + tool_call_id: str + + arguments: str + + type: Annotated[ + Annotated[ + Literal["function.call.delta"], + AfterValidator(validate_const("function.call.delta")), + ], + pydantic.Field(alias="type"), + ] = "function.call.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + confirmation_status: OptionalNullable[FunctionCallEventConfirmationStatus] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["created_at", "output_index", "model", "agent_id", "confirmation_status"] + ) + nullable_fields = set(["model", "agent_id", "confirmation_status"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionCallEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functionname.py b/src/mistralai/client/models/functionname.py new file mode 100644 index 00000000..07d98a0e --- /dev/null +++ b/src/mistralai/client/models/functionname.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000acafdb0c0 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py new file mode 100644 index 00000000..01e2e36f --- /dev/null +++ b/src/mistralai/client/models/functionresultentry.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 213df39bd5e6 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FunctionResultEntryTypedDict(TypedDict): + tool_call_id: str + result: str + object: Literal["entry"] + type: Literal["function.result"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionResultEntry(BaseModel): + tool_call_id: str + + result: str + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["function.result"]], + AfterValidator(validate_const("function.result")), + ], + pydantic.Field(alias="type"), + ] = "function.result" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionResultEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py new file mode 100644 index 00000000..eae87264 --- /dev/null +++ b/src/mistralai/client/models/functiontool.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2e9ef5800117 + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class FunctionToolTypedDict(TypedDict): + function: FunctionTypedDict + type: Literal["function"] + + +class FunctionTool(BaseModel): + function: Function + + type: Annotated[ + Annotated[Literal["function"], AfterValidator(validate_const("function"))], + pydantic.Field(alias="type"), + ] = "function" + + +try: + FunctionTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getfileresponse.py b/src/mistralai/client/models/getfileresponse.py new file mode 100644 index 00000000..f625c153 --- /dev/null +++ b/src/mistralai/client/models/getfileresponse.py @@ -0,0 +1,99 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81919086e371 + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetFileResponseTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + deleted: bool + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class GetFileResponse(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + deleted: bool + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + GetFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getsignedurlresponse.py b/src/mistralai/client/models/getsignedurlresponse.py new file mode 100644 index 00000000..4ba95894 --- /dev/null +++ b/src/mistralai/client/models/getsignedurlresponse.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cee4e4197372 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class GetSignedURLResponseTypedDict(TypedDict): + url: str + + +class GetSignedURLResponse(BaseModel): + url: str diff --git a/src/mistralai/client/models/githubrepository.py b/src/mistralai/client/models/githubrepository.py new file mode 100644 index 00000000..84b01078 --- /dev/null +++ b/src/mistralai/client/models/githubrepository.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4bc83ce18378 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GithubRepositoryTypedDict(TypedDict): + name: str + owner: str + commit_id: str + type: Literal["github"] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepository(BaseModel): + name: str + + owner: str + + commit_id: str + + type: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + GithubRepository.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py new file mode 100644 index 00000000..38bcc208 --- /dev/null +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eef26fbd2876 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GithubRepositoryInTypedDict(TypedDict): + name: str + owner: str + token: str + type: Literal["github"] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryIn(BaseModel): + name: str + + owner: str + + token: str + + type: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + GithubRepositoryIn.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imagedetail.py b/src/mistralai/client/models/imagedetail.py new file mode 100644 index 00000000..1982d357 --- /dev/null +++ b/src/mistralai/client/models/imagedetail.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c1084b549abb + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py new file mode 100644 index 00000000..c1789b18 --- /dev/null +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1532275faa0 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ImageGenerationToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["image_generation"] + + +class ImageGenerationTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["image_generation"], + AfterValidator(validate_const("image_generation")), + ], + pydantic.Field(alias="type"), + ] = "image_generation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ImageGenerationTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py new file mode 100644 index 00000000..ac1030f5 --- /dev/null +++ b/src/mistralai/client/models/imageurl.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4bbf5881fbf + +from __future__ import annotations +from .imagedetail import ImageDetail +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[ImageDetail]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[ImageDetail] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py new file mode 100644 index 00000000..7134b46e --- /dev/null +++ b/src/mistralai/client/models/imageurlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 746fde62f637 + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + type: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/client/models/inputentries.py similarity index 97% rename from src/mistralai/models/inputentries.py rename to src/mistralai/client/models/inputentries.py index 8ae29837..e2da5a80 100644 --- a/src/mistralai/models/inputentries.py +++ b/src/mistralai/client/models/inputentries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 44727997dacb from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict @@ -16,10 +17,10 @@ Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) @@ -29,9 +30,9 @@ Union[ FunctionResultEntry, MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, ], ) diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py new file mode 100644 index 00000000..9ecd7f48 --- /dev/null +++ b/src/mistralai/client/models/inputs.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 84a8007518c7 + +from __future__ import annotations +from .instructrequest import InstructRequest, InstructRequestTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +InputsTypedDict = TypeAliasType( + "InputsTypedDict", Union[InstructRequestTypedDict, List[InstructRequestTypedDict]] +) +r"""Chat to classify""" + + +Inputs = TypeAliasType("Inputs", Union[InstructRequest, List[InstructRequest]]) +r"""Chat to classify""" diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py new file mode 100644 index 00000000..e5f9cccf --- /dev/null +++ b/src/mistralai/client/models/instructrequest.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d3ad9f896c7 + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestMessageTypedDict = TypeAliasType( + "InstructRequestMessageTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestMessage = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestTypedDict(TypedDict): + messages: List[InstructRequestMessageTypedDict] + + +class InstructRequest(BaseModel): + messages: List[InstructRequestMessage] diff --git a/src/mistralai/client/models/jobmetadata.py b/src/mistralai/client/models/jobmetadata.py new file mode 100644 index 00000000..f6e96fa1 --- /dev/null +++ b/src/mistralai/client/models/jobmetadata.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cfbdde7fc0a2 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class JobMetadataTypedDict(TypedDict): + expected_duration_seconds: NotRequired[Nullable[int]] + cost: NotRequired[Nullable[float]] + cost_currency: NotRequired[Nullable[str]] + train_tokens_per_step: NotRequired[Nullable[int]] + train_tokens: NotRequired[Nullable[int]] + data_tokens: NotRequired[Nullable[int]] + estimated_start_time: NotRequired[Nullable[int]] + + +class JobMetadata(BaseModel): + expected_duration_seconds: OptionalNullable[int] = UNSET + + cost: OptionalNullable[float] = UNSET + + cost_currency: OptionalNullable[str] = UNSET + + train_tokens_per_step: OptionalNullable[int] = UNSET + + train_tokens: OptionalNullable[int] = UNSET + + data_tokens: OptionalNullable[int] = UNSET + + estimated_start_time: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py new file mode 100644 index 00000000..de2e6347 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b56cb6c17c95 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py new file mode 100644 index 00000000..d779e1d9 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36b5a6b3ceee + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): + job_id: str + inline: NotRequired[Nullable[bool]] + + +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + inline: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["inline"]) + nullable_fields = set(["inline"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py new file mode 100644 index 00000000..89ac3c93 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -0,0 +1,121 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d8f0af99c94d + +from __future__ import annotations +from .batchjobstatus import BatchJobStatus +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +OrderBy = Literal[ + "created", + "-created", +] + + +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + created_after: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + status: NotRequired[Nullable[List[BatchJobStatus]]] + order_by: NotRequired[OrderBy] + + +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + agent_id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + + status: Annotated[ + OptionalNullable[List[BatchJobStatus]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + order_by: Annotated[ + Optional[OrderBy], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "-created" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "model", + "agent_id", + "metadata", + "created_after", + "created_by_me", + "status", + "order_by", + ] + ) + nullable_fields = set( + ["model", "agent_id", "metadata", "created_after", "status"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py similarity index 78% rename from src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index d728efd1..9fa99837 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 34f89d2af0ec from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..56fa5340 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d175c6e32ecb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningCancelFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..db857f7d --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81651291187a + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ResponseTypedDict = TypeAliasType( + "ResponseTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownResponse(BaseModel): + r"""A Response variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +Response = Annotated[ + Union[ClassifierFineTuningJob, CompletionFineTuningJob, UnknownResponse], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_RESPONSE_VARIANTS, + unknown_cls=UnknownResponse, + union_name="Response", + ) + ), +] + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataTypedDict, ResponseTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadata, Response], +) +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..ddd9c189 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d910fd8fe2d6 + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningGetFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningGetFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py new file mode 100644 index 00000000..ec80a158 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -0,0 +1,162 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf43028824bf + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current job state to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""The page number of the results to be returned.""" + page_size: NotRequired[int] + r"""The number of items to return per page.""" + model: NotRequired[Nullable[str]] + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: NotRequired[Nullable[datetime]] + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: NotRequired[Nullable[str]] + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: NotRequired[Nullable[str]] + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: NotRequired[Nullable[str]] + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""The page number of the results to be returned.""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + r"""The number of items to return per page.""" + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + + created_before: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + + status: Annotated[ + OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The current job state to filter on. When set, the other results are not displayed.""" + + wandb_project: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + + wandb_name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + + suffix: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "page", + "page_size", + "model", + "created_after", + "created_before", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) + nullable_fields = set( + [ + "model", + "created_after", + "created_before", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..cd25fa04 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e7ff4a4a4edb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningStartFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningStartFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py similarity index 78% rename from src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index a84274ff..fd01fe69 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7cc1c80335a9 from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..296070b4 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d9dc624aafd + +from __future__ import annotations +from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, +) +from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, +) +from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_model_request: UpdateModelRequestTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to update.""" + + update_model_request: Annotated[ + UpdateModelRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFineTunedModelTypedDict, ClassifierFineTunedModelTypedDict], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningUpdateFineTunedModelResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + model_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTunedModel, + "completion": CompletionFineTunedModel, +} + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + ClassifierFineTunedModel, + CompletionFineTunedModel, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="model_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + union_name="JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py new file mode 100644 index 00000000..dfababa6 --- /dev/null +++ b/src/mistralai/client/models/jsonschema.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1fc1d8a434a + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/legacyjobmetadata.py b/src/mistralai/client/models/legacyjobmetadata.py new file mode 100644 index 00000000..57576758 --- /dev/null +++ b/src/mistralai/client/models/legacyjobmetadata.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0330b8930f65 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LegacyJobMetadataTypedDict(TypedDict): + details: str + expected_duration_seconds: NotRequired[Nullable[int]] + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: NotRequired[Nullable[float]] + r"""The cost of the fine-tuning job.""" + cost_currency: NotRequired[Nullable[str]] + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: NotRequired[Nullable[int]] + r"""The number of tokens consumed by one training step.""" + train_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens in the training dataset.""" + estimated_start_time: NotRequired[Nullable[int]] + deprecated: NotRequired[bool] + epochs: NotRequired[Nullable[float]] + r"""The number of complete passes through the entire training dataset.""" + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + object: Literal["job.metadata"] + + +class LegacyJobMetadata(BaseModel): + details: str + + expected_duration_seconds: OptionalNullable[int] = UNSET + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + + cost: OptionalNullable[float] = UNSET + r"""The cost of the fine-tuning job.""" + + cost_currency: OptionalNullable[str] = UNSET + r"""The currency used for the fine-tuning job cost.""" + + train_tokens_per_step: OptionalNullable[int] = UNSET + r"""The number of tokens consumed by one training step.""" + + train_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens used during the fine-tuning process.""" + + data_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens in the training dataset.""" + + estimated_start_time: OptionalNullable[int] = UNSET + + deprecated: Optional[bool] = True + + epochs: OptionalNullable[float] = UNSET + r"""The number of complete passes through the entire training dataset.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + object: Annotated[ + Annotated[ + Optional[Literal["job.metadata"]], + AfterValidator(validate_const("job.metadata")), + ], + pydantic.Field(alias="object"), + ] = "job.metadata" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + LegacyJobMetadata.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/libraries_delete_v1op.py b/src/mistralai/client/models/libraries_delete_v1op.py new file mode 100644 index 00000000..893ab53b --- /dev/null +++ b/src/mistralai/client/models/libraries_delete_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2e8bbd19baa + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDeleteV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_delete_v1op.py b/src/mistralai/client/models/libraries_documents_delete_v1op.py similarity index 79% rename from src/mistralai/models/libraries_documents_delete_v1op.py rename to src/mistralai/client/models/libraries_documents_delete_v1op.py index c33710b0..0495832e 100644 --- a/src/mistralai/models/libraries_documents_delete_v1op.py +++ b/src/mistralai/client/models/libraries_documents_delete_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81eb34382a3d from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py similarity index 80% rename from src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py rename to src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py index e2459c1c..186baaed 100644 --- a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py +++ b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a7417ebd6040 from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py similarity index 79% rename from src/mistralai/models/libraries_documents_get_signed_url_v1op.py rename to src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py index bc913ba5..ebcf85d7 100644 --- a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py +++ b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d4b7b47913ba from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/libraries_documents_get_status_v1op.py b/src/mistralai/client/models/libraries_documents_get_status_v1op.py similarity index 79% rename from src/mistralai/models/libraries_documents_get_status_v1op.py rename to src/mistralai/client/models/libraries_documents_get_status_v1op.py index 08992d7c..1f484787 100644 --- a/src/mistralai/models/libraries_documents_get_status_v1op.py +++ b/src/mistralai/client/models/libraries_documents_get_status_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f314f73e909c from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py similarity index 79% rename from src/mistralai/models/libraries_documents_get_text_content_v1op.py rename to src/mistralai/client/models/libraries_documents_get_text_content_v1op.py index 21a131ad..e0508d66 100644 --- a/src/mistralai/models/libraries_documents_get_text_content_v1op.py +++ b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ca4e0c41321 from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/libraries_documents_get_v1op.py b/src/mistralai/client/models/libraries_documents_get_v1op.py similarity index 79% rename from src/mistralai/models/libraries_documents_get_v1op.py rename to src/mistralai/client/models/libraries_documents_get_v1op.py index ff2bdedb..857dfbe6 100644 --- a/src/mistralai/models/libraries_documents_get_v1op.py +++ b/src/mistralai/client/models/libraries_documents_get_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26ff35f0c69d from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/libraries_documents_list_v1op.py b/src/mistralai/client/models/libraries_documents_list_v1op.py new file mode 100644 index 00000000..da7d793b --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_list_v1op.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 756f26de3cbe + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): + library_id: str + search: NotRequired[Nullable[str]] + page_size: NotRequired[int] + page: NotRequired[int] + filters_attributes: NotRequired[Nullable[str]] + sort_by: NotRequired[str] + sort_order: NotRequired[str] + + +class LibrariesDocumentsListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + filters_attributes: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sort_by: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "created_at" + + sort_order: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "desc" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + ) + nullable_fields = set(["search", "filters_attributes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/models/libraries_documents_reprocess_v1op.py b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py similarity index 79% rename from src/mistralai/models/libraries_documents_reprocess_v1op.py rename to src/mistralai/client/models/libraries_documents_reprocess_v1op.py index 861993e7..a2f9ba2a 100644 --- a/src/mistralai/models/libraries_documents_reprocess_v1op.py +++ b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: dbbeb02fc336 from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/libraries_documents_update_v1op.py b/src/mistralai/client/models/libraries_documents_update_v1op.py new file mode 100644 index 00000000..7ad4231f --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_update_v1op.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 734ba6c19f5f + +from __future__ import annotations +from .updatedocumentrequest import UpdateDocumentRequest, UpdateDocumentRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + update_document_request: UpdateDocumentRequestTypedDict + + +class LibrariesDocumentsUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_document_request: Annotated[ + UpdateDocumentRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py similarity index 82% rename from src/mistralai/models/libraries_documents_upload_v1op.py rename to src/mistralai/client/models/libraries_documents_upload_v1op.py index 51f536cc..388633d1 100644 --- a/src/mistralai/models/libraries_documents_upload_v1op.py +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 744466971862 from __future__ import annotations from .file import File, FileTypedDict -from mistralai.types import BaseModel -from mistralai.utils import ( +from mistralai.client.types import BaseModel +from mistralai.client.utils import ( FieldMetadata, MultipartFormMetadata, PathParamMetadata, @@ -12,7 +13,7 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): +class DocumentUploadTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -26,7 +27,7 @@ class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): """ -class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): +class DocumentUpload(BaseModel): file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -42,7 +43,7 @@ class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): library_id: str - request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict + request_body: DocumentUploadTypedDict class LibrariesDocumentsUploadV1Request(BaseModel): @@ -51,6 +52,6 @@ class LibrariesDocumentsUploadV1Request(BaseModel): ] request_body: Annotated[ - LibrariesDocumentsUploadV1DocumentUpload, + DocumentUpload, FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), ] diff --git a/src/mistralai/client/models/libraries_get_v1op.py b/src/mistralai/client/models/libraries_get_v1op.py new file mode 100644 index 00000000..7a51d605 --- /dev/null +++ b/src/mistralai/client/models/libraries_get_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d493f39e7ebb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesGetV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_share_create_v1op.py b/src/mistralai/client/models/libraries_share_create_v1op.py similarity index 79% rename from src/mistralai/models/libraries_share_create_v1op.py rename to src/mistralai/client/models/libraries_share_create_v1op.py index a8b0e35d..00ea7482 100644 --- a/src/mistralai/models/libraries_share_create_v1op.py +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: feaacfd46dd3 from __future__ import annotations from .sharingin import SharingIn, SharingInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/models/libraries_share_delete_v1op.py b/src/mistralai/client/models/libraries_share_delete_v1op.py similarity index 80% rename from src/mistralai/models/libraries_share_delete_v1op.py rename to src/mistralai/client/models/libraries_share_delete_v1op.py index e29d556a..eca3f86a 100644 --- a/src/mistralai/models/libraries_share_delete_v1op.py +++ b/src/mistralai/client/models/libraries_share_delete_v1op.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7f3a679ca384 from __future__ import annotations from .sharingdelete import SharingDelete, SharingDeleteTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict diff --git a/src/mistralai/client/models/libraries_share_list_v1op.py b/src/mistralai/client/models/libraries_share_list_v1op.py new file mode 100644 index 00000000..895a2590 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_list_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8f0af379bf1c + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareListV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesShareListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_update_v1op.py b/src/mistralai/client/models/libraries_update_v1op.py new file mode 100644 index 00000000..54b0ab70 --- /dev/null +++ b/src/mistralai/client/models/libraries_update_v1op.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 92c8d4132252 + +from __future__ import annotations +from .updatelibraryrequest import UpdateLibraryRequest, UpdateLibraryRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesUpdateV1RequestTypedDict(TypedDict): + library_id: str + update_library_request: UpdateLibraryRequestTypedDict + + +class LibrariesUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_library_request: Annotated[ + UpdateLibraryRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/library.py b/src/mistralai/client/models/library.py new file mode 100644 index 00000000..1953b6fb --- /dev/null +++ b/src/mistralai/client/models/library.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 028a34b08f9c + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryTypedDict(TypedDict): + id: str + name: str + created_at: datetime + updated_at: datetime + owner_id: Nullable[str] + owner_type: str + total_size: int + nb_documents: int + chunk_size: Nullable[int] + emoji: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + generated_description: NotRequired[Nullable[str]] + explicit_user_members_count: NotRequired[Nullable[int]] + explicit_workspace_members_count: NotRequired[Nullable[int]] + org_sharing_role: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + r"""Generated Name""" + + +class Library(BaseModel): + id: str + + name: str + + created_at: datetime + + updated_at: datetime + + owner_id: Nullable[str] + + owner_type: str + + total_size: int + + nb_documents: int + + chunk_size: Nullable[int] + + emoji: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + generated_description: OptionalNullable[str] = UNSET + + explicit_user_members_count: OptionalNullable[int] = UNSET + + explicit_workspace_members_count: OptionalNullable[int] = UNSET + + org_sharing_role: OptionalNullable[str] = UNSET + + generated_name: OptionalNullable[str] = UNSET + r"""Generated Name""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) + nullable_fields = set( + [ + "owner_id", + "chunk_size", + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listbatchjobsresponse.py b/src/mistralai/client/models/listbatchjobsresponse.py new file mode 100644 index 00000000..35a348a1 --- /dev/null +++ b/src/mistralai/client/models/listbatchjobsresponse.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 99d94c86a871 + +from __future__ import annotations +from .batchjob import BatchJob, BatchJobTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ListBatchJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobTypedDict]] + object: Literal["list"] + + +class ListBatchJobsResponse(BaseModel): + total: int + + data: Optional[List[BatchJob]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListBatchJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listdocumentsresponse.py b/src/mistralai/client/models/listdocumentsresponse.py new file mode 100644 index 00000000..c48b8c05 --- /dev/null +++ b/src/mistralai/client/models/listdocumentsresponse.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f593d8e66833 + +from __future__ import annotations +from .document import Document, DocumentTypedDict +from .paginationinfo import PaginationInfo, PaginationInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListDocumentsResponseTypedDict(TypedDict): + pagination: PaginationInfoTypedDict + data: List[DocumentTypedDict] + + +class ListDocumentsResponse(BaseModel): + pagination: PaginationInfo + + data: List[Document] diff --git a/src/mistralai/client/models/listfilesresponse.py b/src/mistralai/client/models/listfilesresponse.py new file mode 100644 index 00000000..10a60126 --- /dev/null +++ b/src/mistralai/client/models/listfilesresponse.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85d6d24c1a19 + +from __future__ import annotations +from .fileschema import FileSchema, FileSchemaTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ListFilesResponseTypedDict(TypedDict): + data: List[FileSchemaTypedDict] + object: str + total: NotRequired[Nullable[int]] + + +class ListFilesResponse(BaseModel): + data: List[FileSchema] + + object: str + + total: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["total"]) + nullable_fields = set(["total"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listfinetuningjobsresponse.py b/src/mistralai/client/models/listfinetuningjobsresponse.py new file mode 100644 index 00000000..1e434c59 --- /dev/null +++ b/src/mistralai/client/models/listfinetuningjobsresponse.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 118e05dbfbbd + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ListFineTuningJobsResponseDataTypedDict = TypeAliasType( + "ListFineTuningJobsResponseDataTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownListFineTuningJobsResponseData(BaseModel): + r"""A ListFineTuningJobsResponseData variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +ListFineTuningJobsResponseData = Annotated[ + Union[ + ClassifierFineTuningJob, + CompletionFineTuningJob, + UnknownListFineTuningJobsResponseData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS, + unknown_cls=UnknownListFineTuningJobsResponseData, + union_name="ListFineTuningJobsResponseData", + ) + ), +] + + +class ListFineTuningJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[ListFineTuningJobsResponseDataTypedDict]] + object: Literal["list"] + + +class ListFineTuningJobsResponse(BaseModel): + total: int + + data: Optional[List[ListFineTuningJobsResponseData]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListFineTuningJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listlibrariesresponse.py b/src/mistralai/client/models/listlibrariesresponse.py new file mode 100644 index 00000000..337fe105 --- /dev/null +++ b/src/mistralai/client/models/listlibrariesresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df556a618365 + +from __future__ import annotations +from .library import Library, LibraryTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibrariesResponseTypedDict(TypedDict): + data: List[LibraryTypedDict] + + +class ListLibrariesResponse(BaseModel): + data: List[Library] diff --git a/src/mistralai/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py similarity index 83% rename from src/mistralai/models/listsharingout.py rename to src/mistralai/client/models/listsharingout.py index 38c0dbe0..443ad0d6 100644 --- a/src/mistralai/models/listsharingout.py +++ b/src/mistralai/client/models/listsharingout.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee708a7ccdad from __future__ import annotations from .sharingout import SharingOut, SharingOutTypedDict -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/src/mistralai/models/messageentries.py b/src/mistralai/client/models/messageentries.py similarity index 95% rename from src/mistralai/models/messageentries.py rename to src/mistralai/client/models/messageentries.py index 9b1706de..a95098e0 100644 --- a/src/mistralai/models/messageentries.py +++ b/src/mistralai/client/models/messageentries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e13f9009902b from __future__ import annotations from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py new file mode 100644 index 00000000..1e04ce24 --- /dev/null +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 01025c12866a + +from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageInputContentChunksTypedDict = TypeAliasType( + "MessageInputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ConversationThinkChunkTypedDict, + ToolFileChunkTypedDict, + ], +) + + +MessageInputContentChunks = TypeAliasType( + "MessageInputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ], +) diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py new file mode 100644 index 00000000..c948a13e --- /dev/null +++ b/src/mistralai/client/models/messageinputentry.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a4b5179095 + +from __future__ import annotations +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +Role = Union[ + Literal[ + "assistant", + "user", + ], + UnrecognizedStr, +] + + +MessageInputEntryContentTypedDict = TypeAliasType( + "MessageInputEntryContentTypedDict", + Union[str, List[MessageInputContentChunksTypedDict]], +) + + +MessageInputEntryContent = TypeAliasType( + "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] +) + + +class MessageInputEntryTypedDict(TypedDict): + r"""Representation of an input message inside the conversation.""" + + role: Role + content: MessageInputEntryContentTypedDict + object: Literal["entry"] + type: Literal["message.input"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + prefix: NotRequired[bool] + + +class MessageInputEntry(BaseModel): + r"""Representation of an input message inside the conversation.""" + + role: Role + + content: MessageInputEntryContent + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["message.input"]], + AfterValidator(validate_const("message.input")), + ], + pydantic.Field(alias="type"), + ] = "message.input" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + prefix: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["object", "type", "created_at", "completed_at", "id", "prefix"] + ) + nullable_fields = set(["completed_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + MessageInputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py new file mode 100644 index 00000000..bf455d17 --- /dev/null +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2ed248515035 + +from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageOutputContentChunksTypedDict = TypeAliasType( + "MessageOutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ConversationThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +MessageOutputContentChunks = TypeAliasType( + "MessageOutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py new file mode 100644 index 00000000..6a9c52ed --- /dev/null +++ b/src/mistralai/client/models/messageoutputentry.py @@ -0,0 +1,121 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a07577d2268d + +from __future__ import annotations +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +MessageOutputEntryContentTypedDict = TypeAliasType( + "MessageOutputEntryContentTypedDict", + Union[str, List[MessageOutputContentChunksTypedDict]], +) + + +MessageOutputEntryContent = TypeAliasType( + "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] +) + + +class MessageOutputEntryTypedDict(TypedDict): + content: MessageOutputEntryContentTypedDict + object: Literal["entry"] + type: Literal["message.output"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + id: NotRequired[str] + role: Literal["assistant"] + + +class MessageOutputEntry(BaseModel): + content: MessageOutputEntryContent + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["message.output"]], + AfterValidator(validate_const("message.output")), + ], + pydantic.Field(alias="type"), + ] = "message.output" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + id: Optional[str] = None + + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "role", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + MessageOutputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py new file mode 100644 index 00000000..d765f4fd --- /dev/null +++ b/src/mistralai/client/models/messageoutputevent.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a2bbf63615c6 + +from __future__ import annotations +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +MessageOutputEventContentTypedDict = TypeAliasType( + "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] +) + + +MessageOutputEventContent = TypeAliasType( + "MessageOutputEventContent", Union[str, OutputContentChunks] +) + + +class MessageOutputEventTypedDict(TypedDict): + id: str + content: MessageOutputEventContentTypedDict + type: Literal["message.output.delta"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + content_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + role: Literal["assistant"] + + +class MessageOutputEvent(BaseModel): + id: str + + content: MessageOutputEventContent + + type: Annotated[ + Annotated[ + Literal["message.output.delta"], + AfterValidator(validate_const("message.output.delta")), + ], + pydantic.Field(alias="type"), + ] = "message.output.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + content_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["created_at", "output_index", "content_index", "model", "agent_id", "role"] + ) + nullable_fields = set(["model", "agent_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + MessageOutputEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/metric.py b/src/mistralai/client/models/metric.py new file mode 100644 index 00000000..1413f589 --- /dev/null +++ b/src/mistralai/client/models/metric.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c6a65acdd1a2 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class MetricTypedDict(TypedDict): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: NotRequired[Nullable[float]] + valid_loss: NotRequired[Nullable[float]] + valid_mean_token_accuracy: NotRequired[Nullable[float]] + + +class Metric(BaseModel): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: OptionalNullable[float] = UNSET + + valid_loss: OptionalNullable[float] = UNSET + + valid_mean_token_accuracy: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) + nullable_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/mistralpromptmode.py b/src/mistralai/client/models/mistralpromptmode.py new file mode 100644 index 00000000..9b91323e --- /dev/null +++ b/src/mistralai/client/models/mistralpromptmode.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 95abc4ec799a + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py new file mode 100644 index 00000000..d9293ccc --- /dev/null +++ b/src/mistralai/client/models/modelcapabilities.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 64d8a422ea29 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + function_calling: NotRequired[bool] + completion_fim: NotRequired[bool] + fine_tuning: NotRequired[bool] + vision: NotRequired[bool] + ocr: NotRequired[bool] + classification: NotRequired[bool] + moderation: NotRequired[bool] + audio: NotRequired[bool] + audio_transcription: NotRequired[bool] + + +class ModelCapabilities(BaseModel): + completion_chat: Optional[bool] = False + + function_calling: Optional[bool] = False + + completion_fim: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + vision: Optional[bool] = False + + ocr: Optional[bool] = False + + classification: Optional[bool] = False + + moderation: Optional[bool] = False + + audio: Optional[bool] = False + + audio_transcription: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "function_calling", + "completion_fim", + "fine_tuning", + "vision", + "ocr", + "classification", + "moderation", + "audio", + "audio_transcription", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py new file mode 100644 index 00000000..bb33d2e0 --- /dev/null +++ b/src/mistralai/client/models/modelconversation.py @@ -0,0 +1,179 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fea0a651f888 + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from functools import partial +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelConversationToolTypedDict = TypeAliasType( + "ModelConversationToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +class UnknownModelConversationTool(BaseModel): + r"""A ModelConversationTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_CONVERSATION_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + +ModelConversationTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + UnknownModelConversationTool, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_CONVERSATION_TOOL_VARIANTS, + unknown_cls=UnknownModelConversationTool, + union_name="ModelConversationTool", + ) + ), +] + + +class ModelConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + model: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ModelConversationToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: Literal["conversation"] + + +class ModelConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + model: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ModelConversationTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "object", + ] + ) + nullable_fields = set(["instructions", "name", "description", "metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ModelConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py new file mode 100644 index 00000000..5fd835f2 --- /dev/null +++ b/src/mistralai/client/models/modellist.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00693c7eec60 + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import BeforeValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelListDataTypedDict = TypeAliasType( + "ModelListDataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +) + + +class UnknownModelListData(BaseModel): + r"""A ModelListData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_LIST_DATA_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + +ModelListData = Annotated[ + Union[BaseModelCard, FTModelCard, UnknownModelListData], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_LIST_DATA_VARIANTS, + unknown_cls=UnknownModelListData, + union_name="ModelListData", + ) + ), +] + + +class ModelListTypedDict(TypedDict): + object: NotRequired[str] + data: NotRequired[List[ModelListDataTypedDict]] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + + data: Optional[List[ModelListData]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "data"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py new file mode 100644 index 00000000..e7ccd8f6 --- /dev/null +++ b/src/mistralai/client/models/moderationobject.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 132faad0549a + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ModerationObjectTypedDict(TypedDict): + categories: NotRequired[Dict[str, bool]] + r"""Moderation result thresholds""" + category_scores: NotRequired[Dict[str, float]] + r"""Moderation result""" + + +class ModerationObject(BaseModel): + categories: Optional[Dict[str, bool]] = None + r"""Moderation result thresholds""" + + category_scores: Optional[Dict[str, float]] = None + r"""Moderation result""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["categories", "category_scores"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/models/moderationresponse.py b/src/mistralai/client/models/moderationresponse.py similarity index 86% rename from src/mistralai/models/moderationresponse.py rename to src/mistralai/client/models/moderationresponse.py index ed13cd6b..a8a8ec3d 100644 --- a/src/mistralai/models/moderationresponse.py +++ b/src/mistralai/client/models/moderationresponse.py @@ -1,8 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 06bab279cb31 from __future__ import annotations from .moderationobject import ModerationObject, ModerationObjectTypedDict -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py new file mode 100644 index 00000000..365f062b --- /dev/null +++ b/src/mistralai/client/models/ocrimageobject.py @@ -0,0 +1,87 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 685faeb41a80 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrpagedimensions.py b/src/mistralai/client/models/ocrpagedimensions.py new file mode 100644 index 00000000..847205c6 --- /dev/null +++ b/src/mistralai/client/models/ocrpagedimensions.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02f763afbc9f + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py new file mode 100644 index 00000000..ffc7b3b6 --- /dev/null +++ b/src/mistralai/client/models/ocrpageobject.py @@ -0,0 +1,87 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 07a099f89487 + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py new file mode 100644 index 00000000..4ad337ce --- /dev/null +++ b/src/mistralai/client/models/ocrrequest.py @@ -0,0 +1,148 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36f204c64074 + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentUnionTypedDict = TypeAliasType( + "DocumentUnionTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +DocumentUnion = TypeAliasType( + "DocumentUnion", Union[FileChunk, ImageURLChunk, DocumentURLChunk] +) +r"""Document to run OCR on""" + + +TableFormat = Literal[ + "markdown", + "html", +] + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentUnionTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: DocumentUnion + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py new file mode 100644 index 00000000..e63eed98 --- /dev/null +++ b/src/mistralai/client/models/ocrresponse.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2fdfc881ca56 + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py new file mode 100644 index 00000000..66bb050f --- /dev/null +++ b/src/mistralai/client/models/ocrtableobject.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d74dd0d2ddac + +from __future__ import annotations +from mistralai.client.types import BaseModel, UnrecognizedStr +import pydantic +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict + + +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py new file mode 100644 index 00000000..2ec1322b --- /dev/null +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 272b7e1785d5 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py new file mode 100644 index 00000000..fab7907b --- /dev/null +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9ad9741f4975 + +from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +OutputContentChunksTypedDict = TypeAliasType( + "OutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ConversationThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +OutputContentChunks = TypeAliasType( + "OutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/models/paginationinfo.py b/src/mistralai/client/models/paginationinfo.py similarity index 85% rename from src/mistralai/models/paginationinfo.py rename to src/mistralai/client/models/paginationinfo.py index 00d4f1ec..2b9dab62 100644 --- a/src/mistralai/models/paginationinfo.py +++ b/src/mistralai/client/models/paginationinfo.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48851e82d67e from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing_extensions import TypedDict diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py new file mode 100644 index 00000000..0c6f4182 --- /dev/null +++ b/src/mistralai/client/models/prediction.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1cc842a069a5 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py similarity index 81% rename from src/mistralai/models/processingstatusout.py rename to src/mistralai/client/models/processingstatusout.py index e67bfa86..3acadcc9 100644 --- a/src/mistralai/models/processingstatusout.py +++ b/src/mistralai/client/models/processingstatusout.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df842c4140f from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel from typing_extensions import TypedDict diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py new file mode 100644 index 00000000..c661e461 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c2267378f48 + +from __future__ import annotations +from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionErrorTypedDict(TypedDict): + error: RealtimeTranscriptionErrorDetailTypedDict + type: Literal["error"] + + +class RealtimeTranscriptionError(BaseModel): + error: RealtimeTranscriptionErrorDetail + + type: Annotated[ + Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], + pydantic.Field(alias="type"), + ] = "error" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionError.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py new file mode 100644 index 00000000..cec1f6ea --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5bd25cdf9c7a + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +RealtimeTranscriptionErrorDetailMessageTypedDict = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessageTypedDict", Union[str, Dict[str, Any]] +) +r"""Human-readable error message.""" + + +RealtimeTranscriptionErrorDetailMessage = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessage", Union[str, Dict[str, Any]] +) +r"""Human-readable error message.""" + + +class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): + message: RealtimeTranscriptionErrorDetailMessageTypedDict + r"""Human-readable error message.""" + code: int + r"""Internal error code for debugging.""" + + +class RealtimeTranscriptionErrorDetail(BaseModel): + message: RealtimeTranscriptionErrorDetailMessage + r"""Human-readable error message.""" + + code: int + r"""Internal error code for debugging.""" diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py new file mode 100644 index 00000000..8156a270 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8b03cde6e115 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioAppendTypedDict(TypedDict): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + type: Literal["input_audio.append"] + + +class RealtimeTranscriptionInputAudioAppend(BaseModel): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.append"]], + AfterValidator(validate_const("input_audio.append")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.append" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioAppend.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py new file mode 100644 index 00000000..473eedb7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c187ba1b551d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioEndTypedDict(TypedDict): + type: Literal["input_audio.end"] + + +class RealtimeTranscriptionInputAudioEnd(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.end"]], + AfterValidator(validate_const("input_audio.end")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.end" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioEnd.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py new file mode 100644 index 00000000..553d14c7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b27b600c310e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioFlushTypedDict(TypedDict): + type: Literal["input_audio.flush"] + + +class RealtimeTranscriptionInputAudioFlush(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.flush"]], + AfterValidator(validate_const("input_audio.flush")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.flush" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioFlush.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py new file mode 100644 index 00000000..a74a457b --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02517fa5411a + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class RealtimeTranscriptionSessionTypedDict(TypedDict): + request_id: str + model: str + audio_format: AudioFormatTypedDict + target_streaming_delay_ms: NotRequired[Nullable[int]] + + +class RealtimeTranscriptionSession(BaseModel): + request_id: str + + model: str + + audio_format: AudioFormat + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["target_streaming_delay_ms"]) + nullable_fields = set(["target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py new file mode 100644 index 00000000..bb96875a --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e3731f63a3c + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.created"] + + +class RealtimeTranscriptionSessionCreated(BaseModel): + session: RealtimeTranscriptionSession + + type: Annotated[ + Annotated[ + Optional[Literal["session.created"]], + AfterValidator(validate_const("session.created")), + ], + pydantic.Field(alias="type"), + ] = "session.created" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionCreated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py new file mode 100644 index 00000000..fea5db4a --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 686dc4f2450f + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.updated"] + + +class RealtimeTranscriptionSessionUpdated(BaseModel): + session: RealtimeTranscriptionSession + + type: Annotated[ + Annotated[ + Optional[Literal["session.updated"]], + AfterValidator(validate_const("session.updated")), + ], + pydantic.Field(alias="type"), + ] = "session.updated" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py new file mode 100644 index 00000000..07ad59a4 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e1b3fd7c5a3 + +from __future__ import annotations +from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdateMessageTypedDict(TypedDict): + session: RealtimeTranscriptionSessionUpdatePayloadTypedDict + type: Literal["session.update"] + + +class RealtimeTranscriptionSessionUpdateMessage(BaseModel): + session: RealtimeTranscriptionSessionUpdatePayload + + type: Annotated[ + Annotated[ + Optional[Literal["session.update"]], + AfterValidator(validate_const("session.update")), + ], + pydantic.Field(alias="type"), + ] = "session.update" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdateMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py new file mode 100644 index 00000000..a89441e9 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7033fdb33ad4 + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class RealtimeTranscriptionSessionUpdatePayloadTypedDict(TypedDict): + audio_format: NotRequired[Nullable[AudioFormatTypedDict]] + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + target_streaming_delay_ms: NotRequired[Nullable[int]] + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + +class RealtimeTranscriptionSessionUpdatePayload(BaseModel): + audio_format: OptionalNullable[AudioFormat] = UNSET + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["audio_format", "target_streaming_delay_ms"]) + nullable_fields = set(["audio_format", "target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py new file mode 100644 index 00000000..e0bbae4e --- /dev/null +++ b/src/mistralai/client/models/referencechunk.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 921acd3a224a + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/requestsource.py b/src/mistralai/client/models/requestsource.py new file mode 100644 index 00000000..fc4433cb --- /dev/null +++ b/src/mistralai/client/models/requestsource.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3f2774d9e609 + +from __future__ import annotations +from typing import Literal + + +RequestSource = Literal[ + "api", + "playground", + "agent_builder_v1", +] diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py new file mode 100644 index 00000000..be38fba8 --- /dev/null +++ b/src/mistralai/client/models/responsedoneevent.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf8a686bf82c + +from __future__ import annotations +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResponseDoneEventTypedDict(TypedDict): + usage: ConversationUsageInfoTypedDict + type: Literal["conversation.response.done"] + created_at: NotRequired[datetime] + + +class ResponseDoneEvent(BaseModel): + usage: ConversationUsageInfo + + type: Annotated[ + Annotated[ + Literal["conversation.response.done"], + AfterValidator(validate_const("conversation.response.done")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.done" + + created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py new file mode 100644 index 00000000..fa4d0d01 --- /dev/null +++ b/src/mistralai/client/models/responseerrorevent.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b286d74e8724 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResponseErrorEventTypedDict(TypedDict): + message: str + code: int + type: Literal["conversation.response.error"] + created_at: NotRequired[datetime] + + +class ResponseErrorEvent(BaseModel): + message: str + + code: int + + type: Annotated[ + Annotated[ + Literal["conversation.response.error"], + AfterValidator(validate_const("conversation.response.error")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.error" + + created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseErrorEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py new file mode 100644 index 00000000..b2971412 --- /dev/null +++ b/src/mistralai/client/models/responseformat.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6ab8bc8d22c0 + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py new file mode 100644 index 00000000..21345778 --- /dev/null +++ b/src/mistralai/client/models/responseformats.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4462a05fb08 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ResponseFormats = Union[ + Literal[ + "text", + "json_object", + "json_schema", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py new file mode 100644 index 00000000..84abfcd9 --- /dev/null +++ b/src/mistralai/client/models/responsestartedevent.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 24f54ee8b0f2 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResponseStartedEventTypedDict(TypedDict): + conversation_id: str + type: Literal["conversation.response.started"] + created_at: NotRequired[datetime] + + +class ResponseStartedEvent(BaseModel): + conversation_id: str + + type: Annotated[ + Annotated[ + Literal["conversation.response.started"], + AfterValidator(validate_const("conversation.response.started")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.started" + + created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..cd5955c1 --- /dev/null +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6fefa90ca351 + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to retrieve.""" + + +ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) +r"""Successful Response""" + + +class UnknownResponseRetrieveModelV1ModelsModelIDGet(BaseModel): + r"""A ResponseRetrieveModelV1ModelsModelIDGet variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + +ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[BaseModelCard, FTModelCard, UnknownResponseRetrieveModelV1ModelsModelIDGet], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS, + unknown_cls=UnknownResponseRetrieveModelV1ModelsModelIDGet, + union_name="ResponseRetrieveModelV1ModelsModelIDGet", + ) + ), +] +r"""Successful Response""" diff --git a/src/mistralai/client/models/sampletype.py b/src/mistralai/client/models/sampletype.py new file mode 100644 index 00000000..dfec7cce --- /dev/null +++ b/src/mistralai/client/models/sampletype.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a9309422fed7 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +SampleType = Union[ + Literal[ + "pretrain", + "instruct", + "batch_request", + "batch_result", + "batch_error", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py new file mode 100644 index 00000000..f3b3423e --- /dev/null +++ b/src/mistralai/client/models/security.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c2ca0e2a36b7 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, SecurityMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: NotRequired[str] + + +class Security(BaseModel): + api_key: Annotated[ + Optional[str], + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ApiKey"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/shareenum.py b/src/mistralai/client/models/shareenum.py new file mode 100644 index 00000000..08ffeb7e --- /dev/null +++ b/src/mistralai/client/models/shareenum.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0e2a7a16bf8 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ShareEnum = Union[ + Literal[ + "Viewer", + "Editor", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py new file mode 100644 index 00000000..33ccd7e7 --- /dev/null +++ b/src/mistralai/client/models/sharingdelete.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5ecce372e06 + +from __future__ import annotations +from .entitytype import EntityType +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingDeleteTypedDict(TypedDict): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingDelete(BaseModel): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py new file mode 100644 index 00000000..7c1a52b0 --- /dev/null +++ b/src/mistralai/client/models/sharingin.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e953dda09c02 + +from __future__ import annotations +from .entitytype import EntityType +from .shareenum import ShareEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingInTypedDict(TypedDict): + level: ShareEnum + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingIn(BaseModel): + level: ShareEnum + + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py new file mode 100644 index 00000000..ab3679a4 --- /dev/null +++ b/src/mistralai/client/models/sharingout.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b8804effb5c + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingOutTypedDict(TypedDict): + library_id: str + org_id: str + role: str + share_with_type: str + share_with_uuid: Nullable[str] + user_id: NotRequired[Nullable[str]] + + +class SharingOut(BaseModel): + library_id: str + + org_id: str + + role: str + + share_with_type: str + + share_with_uuid: Nullable[str] + + user_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["user_id"]) + nullable_fields = set(["user_id", "share_with_uuid"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/source.py b/src/mistralai/client/models/source.py new file mode 100644 index 00000000..fcea403c --- /dev/null +++ b/src/mistralai/client/models/source.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fcee60a4ea0d + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +Source = Union[ + Literal[ + "upload", + "repository", + "mistral", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py new file mode 100644 index 00000000..0add960b --- /dev/null +++ b/src/mistralai/client/models/ssetypes.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1733e4765106 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +SSETypes = Union[ + Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.delta", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", + ], + UnrecognizedStr, +] +r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py new file mode 100644 index 00000000..2602cd2d --- /dev/null +++ b/src/mistralai/client/models/systemmessage.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 500ef6e85ba1 + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: Literal["system"] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/systemmessagecontentchunks.py b/src/mistralai/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..d480a219 --- /dev/null +++ b/src/mistralai/client/models/systemmessagecontentchunks.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 297e8905d5af + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py new file mode 100644 index 00000000..ac9f3137 --- /dev/null +++ b/src/mistralai/client/models/textchunk.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9c96fb86a9ab + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + type: Annotated[ + Annotated[Optional[Literal["text"]], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py new file mode 100644 index 00000000..5995e601 --- /dev/null +++ b/src/mistralai/client/models/thinkchunk.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 294bfce193a4 + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ThinkChunkThinkingTypedDict = TypeAliasType( + "ThinkChunkThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +ThinkChunkThinking = TypeAliasType( + "ThinkChunkThinking", Union[ReferenceChunk, TextChunk] +) + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkChunkThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + +class ThinkChunk(BaseModel): + thinking: List[ThinkChunkThinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/timestampgranularity.py b/src/mistralai/client/models/timestampgranularity.py new file mode 100644 index 00000000..8d377375 --- /dev/null +++ b/src/mistralai/client/models/timestampgranularity.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68ddf8d702ea + +from __future__ import annotations +from typing import Literal + + +TimestampGranularity = Literal[ + "segment", + "word", +] diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py new file mode 100644 index 00000000..2b9965e5 --- /dev/null +++ b/src/mistralai/client/models/tool.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48b4f6f50fe9 + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py new file mode 100644 index 00000000..181cec33 --- /dev/null +++ b/src/mistralai/client/models/toolcall.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fb34a1a3f3c2 + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcallconfirmation.py b/src/mistralai/client/models/toolcallconfirmation.py new file mode 100644 index 00000000..fd6eca50 --- /dev/null +++ b/src/mistralai/client/models/toolcallconfirmation.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f2e953cfb4fe + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal +from typing_extensions import TypedDict + + +Confirmation = Literal[ + "allow", + "deny", +] + + +class ToolCallConfirmationTypedDict(TypedDict): + tool_call_id: str + confirmation: Confirmation + + +class ToolCallConfirmation(BaseModel): + tool_call_id: str + + confirmation: Confirmation diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py new file mode 100644 index 00000000..cb787df1 --- /dev/null +++ b/src/mistralai/client/models/toolchoice.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 14f7e4cc35b6 + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py new file mode 100644 index 00000000..d66c3d07 --- /dev/null +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c7798801f860 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolChoiceEnum = Union[ + Literal[ + "auto", + "none", + "any", + "required", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/toolconfiguration.py b/src/mistralai/client/models/toolconfiguration.py new file mode 100644 index 00000000..b903c8b6 --- /dev/null +++ b/src/mistralai/client/models/toolconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: faec24b75066 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ToolConfigurationTypedDict(TypedDict): + exclude: NotRequired[Nullable[List[str]]] + include: NotRequired[Nullable[List[str]]] + requires_confirmation: NotRequired[Nullable[List[str]]] + + +class ToolConfiguration(BaseModel): + exclude: OptionalNullable[List[str]] = UNSET + + include: OptionalNullable[List[str]] = UNSET + + requires_confirmation: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["exclude", "include", "requires_confirmation"]) + nullable_fields = set(["exclude", "include", "requires_confirmation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py new file mode 100644 index 00000000..5a977ca6 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df8f17cf3e07 + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDeltaEventNameTypedDict = TypeAliasType( + "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDeltaEventName = TypeAliasType( + "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDeltaEventTypedDict(TypedDict): + id: str + name: ToolExecutionDeltaEventNameTypedDict + arguments: str + type: Literal["tool.execution.delta"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionDeltaEvent(BaseModel): + id: str + + name: ToolExecutionDeltaEventName + + arguments: str + + type: Annotated[ + Annotated[ + Literal["tool.execution.delta"], + AfterValidator(validate_const("tool.execution.delta")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDeltaEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py new file mode 100644 index 00000000..1c9b0ec9 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 514fdee7d99f + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDoneEventNameTypedDict = TypeAliasType( + "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDoneEventName = TypeAliasType( + "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDoneEventTypedDict(TypedDict): + id: str + name: ToolExecutionDoneEventNameTypedDict + type: Literal["tool.execution.done"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionDoneEvent(BaseModel): + id: str + + name: ToolExecutionDoneEventName + + type: Annotated[ + Annotated[ + Literal["tool.execution.done"], + AfterValidator(validate_const("tool.execution.done")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "info"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py new file mode 100644 index 00000000..0d6f2a13 --- /dev/null +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -0,0 +1,115 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76db69eebe41 + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionEntryNameTypedDict = TypeAliasType( + "ToolExecutionEntryNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionEntryName = TypeAliasType( + "ToolExecutionEntryName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionEntryTypedDict(TypedDict): + name: ToolExecutionEntryNameTypedDict + arguments: str + object: Literal["entry"] + type: Literal["tool.execution"] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + id: NotRequired[str] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionEntry(BaseModel): + name: ToolExecutionEntryName + + arguments: str + + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" + + type: Annotated[ + Annotated[ + Optional[Literal["tool.execution"]], + AfterValidator(validate_const("tool.execution")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + id: Optional[str] = None + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "info", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolExecutionEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py new file mode 100644 index 00000000..21e5bfa8 --- /dev/null +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40fadb8e49a1 + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolExecutionStartedEventNameTypedDict = TypeAliasType( + "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionStartedEventName = TypeAliasType( + "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionStartedEventTypedDict(TypedDict): + id: str + name: ToolExecutionStartedEventNameTypedDict + arguments: str + type: Literal["tool.execution.started"] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + + +class ToolExecutionStartedEvent(BaseModel): + id: str + + name: ToolExecutionStartedEventName + + arguments: str + + type: Annotated[ + Annotated[ + Literal["tool.execution.started"], + AfterValidator(validate_const("tool.execution.started")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "model", "agent_id"]) + nullable_fields = set(["model", "agent_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolExecutionStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py new file mode 100644 index 00000000..0708b3ff --- /dev/null +++ b/src/mistralai/client/models/toolfilechunk.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26c8aadf416a + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolFileChunkToolTypedDict = TypeAliasType( + "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) + + +class ToolFileChunkTypedDict(TypedDict): + tool: ToolFileChunkToolTypedDict + file_id: str + type: Literal["tool_file"] + file_name: NotRequired[Nullable[str]] + file_type: NotRequired[Nullable[str]] + + +class ToolFileChunk(BaseModel): + tool: ToolFileChunkTool + + file_id: str + + type: Annotated[ + Annotated[ + Optional[Literal["tool_file"]], AfterValidator(validate_const("tool_file")) + ], + pydantic.Field(alias="type"), + ] = "tool_file" + + file_name: OptionalNullable[str] = UNSET + + file_type: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "file_name", "file_type"]) + nullable_fields = set(["file_name", "file_type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolFileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py new file mode 100644 index 00000000..05a0ee63 --- /dev/null +++ b/src/mistralai/client/models/toolmessage.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15f1af161031 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + role: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py new file mode 100644 index 00000000..95454fe8 --- /dev/null +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 822e9f3e70de + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ToolReferenceChunkToolTypedDict = TypeAliasType( + "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolReferenceChunkTool = TypeAliasType( + "ToolReferenceChunkTool", Union[BuiltInConnectors, str] +) + + +class ToolReferenceChunkTypedDict(TypedDict): + tool: ToolReferenceChunkToolTypedDict + title: str + type: Literal["tool_reference"] + url: NotRequired[Nullable[str]] + favicon: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class ToolReferenceChunk(BaseModel): + tool: ToolReferenceChunkTool + + title: str + + type: Annotated[ + Annotated[ + Optional[Literal["tool_reference"]], + AfterValidator(validate_const("tool_reference")), + ], + pydantic.Field(alias="type"), + ] = "tool_reference" + + url: OptionalNullable[str] = UNSET + + favicon: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "url", "favicon", "description"]) + nullable_fields = set(["url", "favicon", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/tooltypes.py b/src/mistralai/client/models/tooltypes.py new file mode 100644 index 00000000..e601c196 --- /dev/null +++ b/src/mistralai/client/models/tooltypes.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86c3b54272fd + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py new file mode 100644 index 00000000..2faeda8b --- /dev/null +++ b/src/mistralai/client/models/trainingfile.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2edf9bce227d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class TrainingFileTypedDict(TypedDict): + file_id: str + weight: NotRequired[float] + + +class TrainingFile(BaseModel): + file_id: str + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["weight"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py new file mode 100644 index 00000000..70315463 --- /dev/null +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60896dbc6345 + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class TranscriptionResponseTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + + +class TranscriptionResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py new file mode 100644 index 00000000..b87bfc2f --- /dev/null +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -0,0 +1,93 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d1e6f3bdc74b + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TranscriptionSegmentChunkTypedDict(TypedDict): + text: str + start: float + end: float + type: Literal["transcription_segment"] + score: NotRequired[Nullable[float]] + speaker_id: NotRequired[Nullable[str]] + + +class TranscriptionSegmentChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Annotated[ + Annotated[ + Optional[Literal["transcription_segment"]], + AfterValidator(validate_const("transcription_segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription_segment" + + score: OptionalNullable[float] = UNSET + + speaker_id: OptionalNullable[str] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "score", "speaker_id"]) + nullable_fields = set(["score", "speaker_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TranscriptionSegmentChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py new file mode 100644 index 00000000..e3c50169 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 066a9158ed09 + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TranscriptionStreamDoneTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + type: Literal["transcription.done"] + + +class TranscriptionStreamDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + type: Annotated[ + Annotated[ + Literal["transcription.done"], + AfterValidator(validate_const("transcription.done")), + ], + pydantic.Field(alias="type"), + ] = "transcription.done" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TranscriptionStreamDone.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py new file mode 100644 index 00000000..073fd99a --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b50b3d74f16f + +from __future__ import annotations +from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, +) +from .transcriptionstreameventtypes import TranscriptionStreamEventTypes +from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, +) +from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, +) +from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TranscriptionStreamEventsDataTypedDict = TypeAliasType( + "TranscriptionStreamEventsDataTypedDict", + Union[ + TranscriptionStreamTextDeltaTypedDict, + TranscriptionStreamLanguageTypedDict, + TranscriptionStreamSegmentDeltaTypedDict, + TranscriptionStreamDoneTypedDict, + ], +) + + +class UnknownTranscriptionStreamEventsData(BaseModel): + r"""A TranscriptionStreamEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "transcription.done": TranscriptionStreamDone, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, +} + + +TranscriptionStreamEventsData = Annotated[ + Union[ + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, + UnknownTranscriptionStreamEventsData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownTranscriptionStreamEventsData, + union_name="TranscriptionStreamEventsData", + ) + ), +] + + +class TranscriptionStreamEventsTypedDict(TypedDict): + event: TranscriptionStreamEventTypes + data: TranscriptionStreamEventsDataTypedDict + + +class TranscriptionStreamEvents(BaseModel): + event: TranscriptionStreamEventTypes + + data: TranscriptionStreamEventsData diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py new file mode 100644 index 00000000..c74bbb74 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f71f6fbf4c5 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +TranscriptionStreamEventTypes = Union[ + Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py new file mode 100644 index 00000000..b6c61906 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e94333e4bc27 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict + + +class TranscriptionStreamLanguageTypedDict(TypedDict): + audio_language: str + type: Literal["transcription.language"] + + +class TranscriptionStreamLanguage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audio_language: str + + type: Annotated[ + Annotated[ + Literal["transcription.language"], + AfterValidator(validate_const("transcription.language")), + ], + pydantic.Field(alias="type"), + ] = "transcription.language" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamLanguage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py new file mode 100644 index 00000000..32ef8f9b --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -0,0 +1,90 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a882ce57e5 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): + text: str + start: float + end: float + type: Literal["transcription.segment"] + speaker_id: NotRequired[Nullable[str]] + + +class TranscriptionStreamSegmentDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Annotated[ + Annotated[ + Literal["transcription.segment"], + AfterValidator(validate_const("transcription.segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription.segment" + + speaker_id: OptionalNullable[str] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["speaker_id"]) + nullable_fields = set(["speaker_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TranscriptionStreamSegmentDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py new file mode 100644 index 00000000..42f0ffb7 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6086dc081147 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict + + +class TranscriptionStreamTextDeltaTypedDict(TypedDict): + text: str + type: Literal["transcription.text.delta"] + + +class TranscriptionStreamTextDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Annotated[ + Annotated[ + Literal["transcription.text.delta"], + AfterValidator(validate_const("transcription.text.delta")), + ], + pydantic.Field(alias="type"), + ] = "transcription.text.delta" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamTextDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/unarchivemodelresponse.py b/src/mistralai/client/models/unarchivemodelresponse.py new file mode 100644 index 00000000..5c75d30e --- /dev/null +++ b/src/mistralai/client/models/unarchivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22e2ccbb0c80 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class UnarchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class UnarchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + UnarchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/updateagentrequest.py b/src/mistralai/client/models/updateagentrequest.py new file mode 100644 index 00000000..b751ff74 --- /dev/null +++ b/src/mistralai/client/models/updateagentrequest.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 914b4b2be67a + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import Field, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +UpdateAgentRequestToolTypedDict = TypeAliasType( + "UpdateAgentRequestToolTypedDict", + Union[ + FunctionToolTypedDict, + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +UpdateAgentRequestTool = Annotated[ + Union[ + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, + ], + Field(discriminator="type"), +] + + +class UpdateAgentRequestTypedDict(TypedDict): + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[UpdateAgentRequestToolTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + model: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + deployment_chat: NotRequired[Nullable[bool]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] + + +class UpdateAgentRequest(BaseModel): + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[UpdateAgentRequestTool]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + model: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + deployment_chat: OptionalNullable[bool] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + version_message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatedocumentrequest.py b/src/mistralai/client/models/updatedocumentrequest.py new file mode 100644 index 00000000..61e69655 --- /dev/null +++ b/src/mistralai/client/models/updatedocumentrequest.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a8cfda07d337 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AttributesTypedDict = TypeAliasType( + "AttributesTypedDict", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +Attributes = TypeAliasType( + "Attributes", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +class UpdateDocumentRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] + + +class UpdateDocumentRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Attributes]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "attributes"]) + nullable_fields = set(["name", "attributes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatelibraryrequest.py b/src/mistralai/client/models/updatelibraryrequest.py new file mode 100644 index 00000000..91cbf2a1 --- /dev/null +++ b/src/mistralai/client/models/updatelibraryrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 51bc63885337 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateLibraryRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateLibraryRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatemodelrequest.py b/src/mistralai/client/models/updatemodelrequest.py new file mode 100644 index 00000000..f685cfcc --- /dev/null +++ b/src/mistralai/client/models/updatemodelrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fe649967751e + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateModelRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateModelRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py new file mode 100644 index 00000000..31cbf07e --- /dev/null +++ b/src/mistralai/client/models/usageinfo.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 54adb9a3af16 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py new file mode 100644 index 00000000..63e76792 --- /dev/null +++ b/src/mistralai/client/models/usermessage.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb583483acf4 + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: Literal["user"] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py new file mode 100644 index 00000000..385714c8 --- /dev/null +++ b/src/mistralai/client/models/validationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15df3c7368ab + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py new file mode 100644 index 00000000..f0df2c77 --- /dev/null +++ b/src/mistralai/client/models/wandbintegration.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4823c1e80942 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WandbIntegrationTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + type: Literal["wandb"] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegration(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + api_key: str + r"""The WandB API key to use for authentication.""" + + type: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "run_name"]) + nullable_fields = set(["name", "run_name"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WandbIntegration.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/wandbintegrationresult.py b/src/mistralai/client/models/wandbintegrationresult.py new file mode 100644 index 00000000..575cbd42 --- /dev/null +++ b/src/mistralai/client/models/wandbintegrationresult.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8787b4ad5458 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WandbIntegrationResultTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + type: Literal["wandb"] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + url: NotRequired[Nullable[str]] + + +class WandbIntegrationResult(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + type: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + url: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "run_name", "url"]) + nullable_fields = set(["name", "run_name", "url"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WandbIntegrationResult.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py new file mode 100644 index 00000000..00d4a4b4 --- /dev/null +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bfe88af887e3 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WebSearchPremiumToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["web_search_premium"] + + +class WebSearchPremiumTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[ + Literal["web_search_premium"], + AfterValidator(validate_const("web_search_premium")), + ], + pydantic.Field(alias="type"), + ] = "web_search_premium" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchPremiumTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py new file mode 100644 index 00000000..6871080f --- /dev/null +++ b/src/mistralai/client/models/websearchtool.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26b0903423e5 + +from __future__ import annotations +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class WebSearchToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] + type: Literal["web_search"] + + +class WebSearchTool(BaseModel): + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ + Annotated[Literal["web_search"], AfterValidator(validate_const("web_search"))], + pydantic.Field(alias="type"), + ] = "web_search" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/models_.py b/src/mistralai/client/models_.py similarity index 76% rename from src/mistralai/models_.py rename to src/mistralai/client/models_.py index b712c545..a287c413 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/client/models_.py @@ -1,10 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d277958a843 from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional @@ -50,6 +52,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -66,43 +69,26 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), ), request=req, - error_status_codes=["422", "4XX", "5XX"], + error_status_codes=["4XX", "5XX"], retry_config=retry_config, ) - response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModelList) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -143,6 +129,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -159,43 +146,26 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), ), request=req, - error_status_codes=["422", "4XX", "5XX"], + error_status_codes=["4XX", "5XX"], retry_config=retry_config, ) - response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModelList) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -205,7 +175,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve information about a model. @@ -243,6 +213,7 @@ def retrieve( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -259,7 +230,7 @@ def retrieve( config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -271,34 +242,22 @@ def retrieve( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + return unmarshal_json_response( + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -308,7 +267,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve information about a model. @@ -346,6 +305,7 @@ async def retrieve_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -362,7 +322,7 @@ async def retrieve_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -374,34 +334,22 @@ async def retrieve_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + return unmarshal_json_response( + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -449,6 +397,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -465,7 +414,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -477,31 +426,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteModelOut) + return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -549,6 +487,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -565,7 +504,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -577,31 +516,20 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteModelOut) + return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -638,7 +566,7 @@ def update( request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( + update_model_request=models.UpdateModelRequest( name=name, description=description, ), @@ -658,8 +586,13 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -676,7 +609,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -687,29 +620,17 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -746,7 +667,7 @@ async def update_async( request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( + update_model_request=models.UpdateModelRequest( name=name, description=description, ), @@ -766,8 +687,13 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -784,7 +710,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -795,29 +721,17 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def archive( self, @@ -827,7 +741,7 @@ def archive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: + ) -> models.ArchiveModelResponse: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -865,6 +779,7 @@ def archive( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -881,7 +796,7 @@ def archive( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -892,26 +807,15 @@ def archive( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ArchiveFTModelOut) + return unmarshal_json_response(models.ArchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def archive_async( self, @@ -921,7 +825,7 @@ async def archive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: + ) -> models.ArchiveModelResponse: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -959,6 +863,7 @@ async def archive_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -975,7 +880,7 @@ async def archive_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -986,26 +891,15 @@ async def archive_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ArchiveFTModelOut) + return unmarshal_json_response(models.ArchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def unarchive( self, @@ -1015,7 +909,7 @@ def unarchive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: + ) -> models.UnarchiveModelResponse: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -1053,6 +947,7 @@ def unarchive( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1069,7 +964,7 @@ def unarchive( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1080,26 +975,15 @@ def unarchive( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UnarchiveFTModelOut) + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def unarchive_async( self, @@ -1109,7 +993,7 @@ async def unarchive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: + ) -> models.UnarchiveModelResponse: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -1147,6 +1031,7 @@ async def unarchive_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1163,7 +1048,7 @@ async def unarchive_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1174,23 +1059,12 @@ async def unarchive_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UnarchiveFTModelOut) + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py new file mode 100644 index 00000000..a46119d1 --- /dev/null +++ b/src/mistralai/client/ocr.py @@ -0,0 +1,284 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f804a12fc62 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import Nullable, OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + r"""OCR API""" + + def process( + self, + *, + model: Nullable[str], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.DocumentUnion), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.DocumentUnion), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request_async( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/py.typed b/src/mistralai/client/py.typed similarity index 100% rename from src/mistralai/py.typed rename to src/mistralai/client/py.typed diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py new file mode 100644 index 00000000..80bf25a7 --- /dev/null +++ b/src/mistralai/client/sdk.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48edbcb38d7e + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import httpx +import importlib +from mistralai.client import models as models_, utils +from mistralai.client._hooks import SDKHooks +from mistralai.client.types import OptionalNullable, UNSET +import sys +from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import weakref + +if TYPE_CHECKING: + from mistralai.client.agents import Agents + from mistralai.client.audio import Audio + from mistralai.client.batch import Batch + from mistralai.client.beta import Beta + from mistralai.client.chat import Chat + from mistralai.client.classifiers import Classifiers + from mistralai.client.embeddings import Embeddings + from mistralai.client.files import Files + from mistralai.client.fim import Fim + from mistralai.client.fine_tuning import FineTuning + from mistralai.client.models_ import Models + from mistralai.client.ocr import Ocr + + +class Mistral(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + models: "Models" + r"""Model Management API""" + beta: "Beta" + files: "Files" + r"""Files API""" + fine_tuning: "FineTuning" + batch: "Batch" + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + agents: "Agents" + r"""Agents API.""" + embeddings: "Embeddings" + r"""Embeddings API.""" + classifiers: "Classifiers" + r"""Classifiers API.""" + ocr: "Ocr" + r"""OCR API""" + audio: "Audio" + _sub_sdk_map = { + "models": ("mistralai.client.models_", "Models"), + "beta": ("mistralai.client.beta", "Beta"), + "files": ("mistralai.client.files", "Files"), + "fine_tuning": ("mistralai.client.fine_tuning", "FineTuning"), + "batch": ("mistralai.client.batch", "Batch"), + "chat": ("mistralai.client.chat", "Chat"), + "fim": ("mistralai.client.fim", "Fim"), + "agents": ("mistralai.client.agents", "Agents"), + "embeddings": ("mistralai.client.embeddings", "Embeddings"), + "classifiers": ("mistralai.client.classifiers", "Classifiers"), + "ocr": ("mistralai.client.ocr", "Ocr"), + "audio": ("mistralai.client.audio", "Audio"), + } + + def __init__( + self, + api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Any = None + if callable(api_key): + # pylint: disable=unnecessary-lambda-assignment + security = lambda: models_.Security(api_key=api_key()) + else: + security = models_.Security(api_key=api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + for attempt in range(retries): + try: + return importlib.import_module(modname) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/client/sdkconfiguration.py b/src/mistralai/client/sdkconfiguration.py new file mode 100644 index 00000000..712e92e0 --- /dev/null +++ b/src/mistralai/client/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b7dd68a0235e + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.client import models +from mistralai.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/src/mistralai/transcriptions.py b/src/mistralai/client/transcriptions.py similarity index 84% rename from src/mistralai/transcriptions.py rename to src/mistralai/client/transcriptions.py index 24975cb2..7f01917d 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/client/transcriptions.py @@ -1,10 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75b45780c978 from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import List, Mapping, Optional, Union @@ -20,6 +22,8 @@ def complete( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -28,12 +32,14 @@ def complete( ) -> models.TranscriptionResponse: r"""Create Transcription - :param model: + :param model: ID of the model to be used. :param file: :param file_url: Url of a file to be transcribed :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -57,6 +63,8 @@ def complete( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -76,6 +84,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", models.AudioTranscriptionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -92,7 +101,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -103,26 +112,15 @@ def complete( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -133,6 +131,8 @@ async def complete_async( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -141,12 +141,14 @@ async def complete_async( ) -> models.TranscriptionResponse: r"""Create Transcription - :param model: + :param model: ID of the model to be used. :param file: :param file_url: Url of a file to be transcribed :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -170,6 +172,8 @@ async def complete_async( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -189,6 +193,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", models.AudioTranscriptionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -205,7 +210,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -216,26 +221,15 @@ async def complete_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, @@ -246,13 +240,15 @@ def stream( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: - r"""Create streaming transcription (SSE) + r"""Create Streaming Transcription (SSE) :param model: :param file: @@ -260,6 +256,8 @@ def stream( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -283,6 +281,8 @@ def stream( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -306,6 +306,7 @@ def stream( "multipart", models.AudioTranscriptionRequestStream, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -322,7 +323,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -337,26 +338,17 @@ def stream( return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -367,13 +359,15 @@ async def stream_async( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: - r"""Create streaming transcription (SSE) + r"""Create Streaming Transcription (SSE) :param model: :param file: @@ -381,6 +375,8 @@ async def stream_async( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -404,6 +400,8 @@ async def stream_async( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -427,6 +425,7 @@ async def stream_async( "multipart", models.AudioTranscriptionRequestStream, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -443,7 +442,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -458,23 +457,14 @@ async def stream_async( return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise errors.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/types/__init__.py b/src/mistralai/client/types/__init__.py new file mode 100644 index 00000000..cf838643 --- /dev/null +++ b/src/mistralai/client/types/__init__.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000b943f821c + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/src/mistralai/client/types/basemodel.py b/src/mistralai/client/types/basemodel.py new file mode 100644 index 00000000..4e889aa0 --- /dev/null +++ b/src/mistralai/client/types/basemodel.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7ec465a1d3ff + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py new file mode 100644 index 00000000..4bde281a --- /dev/null +++ b/src/mistralai/client/utils/__init__.py @@ -0,0 +1,182 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b69505f4b269 + +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .unions import parse_open_union + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security, get_security_from_env + + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "get_security_from_env", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "parse_open_union", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "get_security_from_env": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/utils/annotations.py b/src/mistralai/client/utils/annotations.py new file mode 100644 index 00000000..4b60ab8e --- /dev/null +++ b/src/mistralai/client/utils/annotations.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ffdedfc66a2 + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/client/utils/datetimes.py b/src/mistralai/client/utils/datetimes.py new file mode 100644 index 00000000..a2c94fac --- /dev/null +++ b/src/mistralai/client/utils/datetimes.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c40066d868c9 + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/client/utils/dynamic_imports.py b/src/mistralai/client/utils/dynamic_imports.py new file mode 100644 index 00000000..969f2fc7 --- /dev/null +++ b/src/mistralai/client/utils/dynamic_imports.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ac9918d925c0 + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/utils/enums.py b/src/mistralai/client/utils/enums.py new file mode 100644 index 00000000..d897495f --- /dev/null +++ b/src/mistralai/client/utils/enums.py @@ -0,0 +1,135 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0735873b5ac + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py new file mode 100644 index 00000000..19a12152 --- /dev/null +++ b/src/mistralai/client/utils/eventstreaming.py @@ -0,0 +1,281 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3263d7502030 + +import re +import json +from dataclasses import dataclass, asdict +from typing import ( + Any, + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __iter__(self): + return self + + def __next__(self): + if self._closed: + raise StopIteration + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + _closed: bool + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + self._closed = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._closed: + raise StopAsyncIteration + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True + await self.response.aclose() + + +@dataclass +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Any = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", + b"\r\r", + b"\n\r", + b"\n\n", +] + +UTF8_BOM = b"\xef\xbb\xbf" + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + async for chunk in response.aiter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + await response.aclose() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + event_id: Optional[str] = None + for chunk in response.iter_bytes(): + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + if discard: + response.close() + return + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) + if event is not None: + yield event + + +def _parse_event( + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim == 0: + continue + + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + publish = True + if "\x00" not in value: + event_id = value + elif field == "retry": + if value.isdigit(): + event.retry = int(value) + publish = True + + event.id = event_id + + if sentinel and data == f"{sentinel}\n": + return None, True, event_id + + if data: + data = data[:-1] + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data + + out = None + if publish: + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py new file mode 100644 index 00000000..6facec53 --- /dev/null +++ b/src/mistralai/client/utils/forms.py @@ -0,0 +1,235 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58842e905fce + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/src/mistralai/utils/headers.py b/src/mistralai/client/utils/headers.py similarity index 99% rename from src/mistralai/utils/headers.py rename to src/mistralai/client/utils/headers.py index 37864cbb..64911872 100644 --- a/src/mistralai/utils/headers.py +++ b/src/mistralai/client/utils/headers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9066de2ead8b from typing import ( Any, diff --git a/src/mistralai/utils/logger.py b/src/mistralai/client/utils/logger.py similarity index 88% rename from src/mistralai/utils/logger.py rename to src/mistralai/client/utils/logger.py index cc089307..3edad830 100644 --- a/src/mistralai/utils/logger.py +++ b/src/mistralai/client/utils/logger.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 745023607a1f import httpx import logging @@ -23,5 +24,5 @@ def get_body_content(req: httpx.Request) -> str: def get_default_logger() -> Logger: if os.getenv("MISTRAL_DEBUG"): logging.basicConfig(level=logging.DEBUG) - return logging.getLogger("mistralai") + return logging.getLogger("mistralai.client") return NoOpLogger() diff --git a/src/mistralai/utils/metadata.py b/src/mistralai/client/utils/metadata.py similarity index 99% rename from src/mistralai/utils/metadata.py rename to src/mistralai/client/utils/metadata.py index 173b3e5c..d46ffa59 100644 --- a/src/mistralai/utils/metadata.py +++ b/src/mistralai/client/utils/metadata.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d49d535ae52c from typing import Optional, Type, TypeVar, Union from dataclasses import dataclass diff --git a/src/mistralai/client/utils/queryparams.py b/src/mistralai/client/utils/queryparams.py new file mode 100644 index 00000000..0b78c548 --- /dev/null +++ b/src/mistralai/client/utils/queryparams.py @@ -0,0 +1,218 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb77d4664844 + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/src/mistralai/client/utils/requestbodies.py b/src/mistralai/client/utils/requestbodies.py new file mode 100644 index 00000000..3aae69c7 --- /dev/null +++ b/src/mistralai/client/utils/requestbodies.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 946cfcd26ee4 + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py new file mode 100644 index 00000000..bea13041 --- /dev/null +++ b/src/mistralai/client/utils/retries.py @@ -0,0 +1,272 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f1a5b90423c + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except (httpx.NetworkError, httpx.TimeoutException) as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py new file mode 100644 index 00000000..d8b9d8fe --- /dev/null +++ b/src/mistralai/client/utils/security.py @@ -0,0 +1,195 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1acb7c006265 + +import base64 + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) +import os + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: + if security is not None: + return security + + if not issubclass(security_class, BaseModel): + raise TypeError("security_class must be a pydantic model class") + + security_dict: Any = {} + + if os.getenv("MISTRAL_API_KEY"): + security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") + + return security_class(**security_dict) if security_dict else None + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/client/utils/serializers.py b/src/mistralai/client/utils/serializers.py new file mode 100644 index 00000000..fbc2772d --- /dev/null +++ b/src/mistralai/client/utils/serializers.py @@ -0,0 +1,230 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 53c57c7f29a8 + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/src/mistralai/client/utils/unions.py b/src/mistralai/client/utils/unions.py new file mode 100644 index 00000000..14ef1bd5 --- /dev/null +++ b/src/mistralai/client/utils/unions.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d23713342634 + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..624433c4 --- /dev/null +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b13585fc5626 + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.client import errors + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise errors.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/src/mistralai/utils/url.py b/src/mistralai/client/utils/url.py similarity index 99% rename from src/mistralai/utils/url.py rename to src/mistralai/client/utils/url.py index c78ccbae..27a6a3a0 100644 --- a/src/mistralai/utils/url.py +++ b/src/mistralai/client/utils/url.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3c6496c17510 from decimal import Decimal from typing import ( diff --git a/src/mistralai/utils/values.py b/src/mistralai/client/utils/values.py similarity index 99% rename from src/mistralai/utils/values.py rename to src/mistralai/client/utils/values.py index dae01a44..2469a9f3 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/client/utils/values.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb6ade7a7f82 from datetime import datetime from enum import Enum diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py deleted file mode 100644 index 009df94d..00000000 --- a/src/mistralai/conversations.py +++ /dev/null @@ -1,2680 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, List, Mapping, Optional, Union - -# region imports -import typing -from typing import AsyncGenerator -import logging -from collections import defaultdict - -from mistralai.models import ( - ResponseStartedEvent, - ConversationEventsData, - InputEntries, -) -from mistralai.extra.run.result import ( - RunResult, - RunResultEvents, - FunctionResultEvent, - reconstitue_entries, -) -from mistralai.extra.run.utils import run_requirements - -logger = logging.getLogger(__name__) - -if typing.TYPE_CHECKING: - from mistralai.extra.run.context import RunContext - -# endregion imports - - -class Conversations(BaseSDK): - r"""(beta) Conversations API""" - - # region sdk-class-body - # Custom run code allowing client side execution of code - - @run_requirements - async def run_async( - self, - run_ctx: "RunContext", - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> RunResult: - """Run a conversation with the given inputs and context. - - The execution of a run will only stop when no required local execution can be done.""" - from mistralai.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls - - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) - - while True: - if run_ctx.conversation_id is None: - res = await self.start_async( - inputs=input_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, - ) - run_result.conversation_id = res.conversation_id - run_ctx.conversation_id = res.conversation_id - logger.info( - f"Started Run with conversation with id {res.conversation_id}" - ) - else: - res = await self.append_async( - conversation_id=run_ctx.conversation_id, - inputs=input_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - run_ctx.request_count += 1 - run_result.output_entries.extend(res.outputs) - fcalls = get_function_calls(res.outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - input_entries = typing.cast(list[InputEntries], fresults) - return run_result - - @run_requirements - async def run_stream_async( - self, - run_ctx: "RunContext", - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: - """Similar to `run_async` but returns a generator which streams events. - - The last streamed object is the RunResult object which summarises what happened in the run.""" - from mistralai.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls - - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) - - async def run_generator() -> ( - AsyncGenerator[Union[RunResultEvents, RunResult], None] - ): - current_entries = input_entries - while True: - received_event_tracker: defaultdict[ - int, list[ConversationEventsData] - ] = defaultdict(list) - if run_ctx.conversation_id is None: - res = await self.start_stream_async( - inputs=current_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, - ) - else: - res = await self.append_stream_async( - conversation_id=run_ctx.conversation_id, - inputs=current_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - async for event in res: - if ( - isinstance(event.data, ResponseStartedEvent) - and run_ctx.conversation_id is None - ): - run_result.conversation_id = event.data.conversation_id - run_ctx.conversation_id = event.data.conversation_id - logger.info( - f"Started Run with conversation with id {run_ctx.conversation_id}" - ) - if ( - output_index := getattr(event.data, "output_index", None) - ) is not None: - received_event_tracker[output_index].append(event.data) - yield typing.cast(RunResultEvents, event) - run_ctx.request_count += 1 - outputs = reconstitue_entries(received_event_tracker) - run_result.output_entries.extend(outputs) - fcalls = get_function_calls(outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - for fresult in fresults: - yield RunResultEvents( - event="function.result", - data=FunctionResultEvent( - type="function.result", - result=fresult.result, - tool_call_id=fresult.tool_call_id, - ), - ) - current_entries = typing.cast(list[InputEntries], fresults) - yield run_result - - return run_generator() - - # endregion sdk-class-body - - def start( - self, - *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = False, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: - :param completion_args: - :param name: - :param description: - :param agent_id: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - agent_id=agent_id, - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def start_async( - self, - *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = False, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: - :param completion_args: - :param name: - :param description: - :param agent_id: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - agent_id=agent_id, - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: - r"""List all created conversations. - - Retrieve a list of conversation entities sorted by creation time. - - :param page: - :param page_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsListRequest( - page=page, - page_size=page_size, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: - r"""List all created conversations. - - Retrieve a list of conversation entities sorted by creation time. - - :param page: - :param page_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsListRequest( - page=page, - page_size=page_size, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def get( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: - r"""Retrieve a conversation information. - - Given a conversation_id retrieve a conversation entity with its attributes. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsGetRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def get_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: - r"""Retrieve a conversation information. - - Given a conversation_id retrieve a conversation entity with its attributes. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsGetRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def append( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationAppendRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendRequest( - conversation_id=conversation_id, - conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_request, - False, - False, - "json", - models.ConversationAppendRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def append_async( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationAppendRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendRequest( - conversation_id=conversation_id, - conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_request, - False, - False, - "json", - models.ConversationAppendRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def get_history( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationHistory: - r"""Retrieve all entries in a conversation. - - Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - - :param conversation_id: ID of the conversation from which we are fetching entries. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsHistoryRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}/history", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationHistory) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def get_history_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationHistory: - r"""Retrieve all entries in a conversation. - - Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - - :param conversation_id: ID of the conversation from which we are fetching entries. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsHistoryRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}/history", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationHistory) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def get_messages( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationMessages: - r"""Retrieve all messages in a conversation. - - Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - - :param conversation_id: ID of the conversation from which we are fetching messages. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsMessagesRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}/messages", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationMessages) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def get_messages_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationMessages: - r"""Retrieve all messages in a conversation. - - Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - - :param conversation_id: ID of the conversation from which we are fetching messages. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsMessagesRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}/messages", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationMessages) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def restart( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - from_entry_id: str, - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationRestartRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartRequest( - conversation_id=conversation_id, - conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - from_entry_id=from_entry_id, - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}/restart", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_request, - False, - False, - "json", - models.ConversationRestartRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def restart_async( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - from_entry_id: str, - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationRestartRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartRequest( - conversation_id=conversation_id, - conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - from_entry_id=from_entry_id, - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}/restart", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_request, - False, - False, - "json", - models.ConversationRestartRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def start_stream( - self, - *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = True, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models.ConversationStreamRequestHandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[ - List[models.ConversationStreamRequestTools], - List[models.ConversationStreamRequestToolsTypedDict], - ] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: - :param completion_args: - :param name: - :param description: - :param agent_id: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model( - tools, OptionalNullable[List[models.ConversationStreamRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - agent_id=agent_id, - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/conversations#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def start_stream_async( - self, - *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = True, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models.ConversationStreamRequestHandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[ - List[models.ConversationStreamRequestTools], - List[models.ConversationStreamRequestToolsTypedDict], - ] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: - :param completion_args: - :param name: - :param description: - :param agent_id: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model( - tools, OptionalNullable[List[models.ConversationStreamRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - agent_id=agent_id, - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def append_stream( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationAppendStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendStreamRequest( - conversation_id=conversation_id, - conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_stream_request, - False, - False, - "json", - models.ConversationAppendStreamRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def append_stream_async( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationAppendStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendStreamRequest( - conversation_id=conversation_id, - conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_stream_request, - False, - False, - "json", - models.ConversationAppendStreamRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def restart_stream( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - from_entry_id: str, - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationRestartStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartStreamRequest( - conversation_id=conversation_id, - conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - from_entry_id=from_entry_id, - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}/restart#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_stream_request, - False, - False, - "json", - models.ConversationRestartStreamRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def restart_stream_async( - self, - *, - conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - from_entry_id: str, - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models.ConversationRestartStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartStreamRequest( - conversation_id=conversation_id, - conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - from_entry_id=from_entry_id, - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}/restart#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_stream_request, - False, - False, - "json", - models.ConversationRestartStreamRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py deleted file mode 100644 index ef0699d1..00000000 --- a/src/mistralai/embeddings.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import Any, Mapping, Optional, Union - - -class Embeddings(BaseSDK): - r"""Embeddings API.""" - - def create( - self, - *, - model: str, - inputs: Union[ - models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict - ], - output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models.EmbeddingDtype] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.EmbeddingResponse: - r"""Embeddings - - Embeddings - - :param model: ID of the model to use. - :param inputs: Text to embed. - :param output_dimension: The dimension of the output embeddings. - :param output_dtype: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.EmbeddingRequest( - model=model, - inputs=inputs, - output_dimension=output_dimension, - output_dtype=output_dtype, - ) - - req = self._build_request( - method="POST", - path="/v1/embeddings", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.EmbeddingRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def create_async( - self, - *, - model: str, - inputs: Union[ - models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict - ], - output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models.EmbeddingDtype] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.EmbeddingResponse: - r"""Embeddings - - Embeddings - - :param model: ID of the model to use. - :param inputs: Text to embed. - :param output_dimension: The dimension of the output embeddings. - :param output_dtype: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.EmbeddingRequest( - model=model, - inputs=inputs, - output_dimension=output_dimension, - output_dtype=output_dtype, - ) - - req = self._build_request_async( - method="POST", - path="/v1/embeddings", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.EmbeddingRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/extra/README.md b/src/mistralai/extra/README.md index dfce43b3..0593d84a 100644 --- a/src/mistralai/extra/README.md +++ b/src/mistralai/extra/README.md @@ -34,7 +34,7 @@ class Chat(BaseSDK): 3. Now build the SDK with the custom code: ```bash -rm -rf dist; poetry build; python3 -m pip install ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl --force-reinstall +rm -rf dist; uv build; uv pip install --reinstall ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl ``` 4. And now you should be able to call the custom method: diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py index d9a81d24..cabda728 100644 --- a/src/mistralai/extra/__init__.py +++ b/src/mistralai/extra/__init__.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + from .struct_chat import ( ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response, @@ -5,9 +7,55 @@ from .utils import response_format_from_pydantic_model from .utils.response_format import CustomPydanticModel +if TYPE_CHECKING: + from .realtime import ( + AudioEncoding, + AudioFormat, + RealtimeConnection, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + RealtimeTranscription, + UnknownRealtimeEvent, + ) + +_REALTIME_EXPORTS = { + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", +} + + +def __getattr__(name: str): + if name in _REALTIME_EXPORTS: + from . import realtime + + return getattr(realtime, name) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + __all__ = [ "convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse", + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", ] diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py index 7853ddc2..d2cd3e79 100644 --- a/src/mistralai/extra/exceptions.py +++ b/src/mistralai/extra/exceptions.py @@ -1,14 +1,59 @@ +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.client.models import RealtimeTranscriptionError + + class MistralClientException(Exception): - """Base exception for all the client errors.""" + """Base exception for client errors.""" class RunException(MistralClientException): - """Exception raised for errors during a conversation run.""" + """Conversation run errors.""" class MCPException(MistralClientException): - """Exception raised for errors related to MCP operations.""" + """MCP operation errors.""" class MCPAuthException(MCPException): - """Exception raised for authentication errors with an MCP server.""" + """MCP authentication errors.""" + + +class RealtimeTranscriptionException(MistralClientException): + """Base realtime transcription exception.""" + + def __init__( + self, + message: str, + *, + code: Optional[int] = None, + payload: Optional[object] = None, + ) -> None: + super().__init__(message) + self.code = code + self.payload = payload + + +class RealtimeTranscriptionWSError(RealtimeTranscriptionException): + def __init__( + self, + message: str, + *, + payload: Optional["RealtimeTranscriptionError"] = None, + raw: Optional[object] = None, + ) -> None: + code: Optional[int] = None + if payload is not None: + try: + maybe_code = getattr(payload.error, "code", None) + if isinstance(maybe_code, int): + code = maybe_code + except Exception: + code = None + + super().__init__( + message, code=code, payload=payload if payload is not None else raw + ) + self.payload_typed = payload + self.payload_raw = raw diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py index 909f5d4a..8a61ddab 100644 --- a/src/mistralai/extra/mcp/auth.py +++ b/src/mistralai/extra/mcp/auth.py @@ -1,11 +1,10 @@ -from typing import Optional +import logging -from authlib.oauth2.rfc8414 import AuthorizationServerMetadata -from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase import httpx -import logging +from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase +from authlib.oauth2.rfc8414 import AuthorizationServerMetadata -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel logger = logging.getLogger(__name__) @@ -16,8 +15,8 @@ class Oauth2AuthorizationScheme(BaseModel): authorization_url: str token_url: str scope: list[str] - description: Optional[str] = None - refresh_url: Optional[str] = None + description: str | None = None + refresh_url: str | None = None class OAuthParams(BaseModel): @@ -42,7 +41,7 @@ def from_oauth_params(cls, oauth_params: OAuthParams) -> "AsyncOAuth2Client": async def get_well_known_authorization_server_metadata( server_url: str, -) -> Optional[AuthorizationServerMetadata]: +) -> AuthorizationServerMetadata | None: """Fetch the metadata from the well-known location. This should be available on MCP servers as described by the specification: @@ -123,10 +122,10 @@ async def dynamic_client_registration( async def build_oauth_params( server_url: str, redirect_url: str, - client_id: Optional[str] = None, - client_secret: Optional[str] = None, - scope: Optional[list[str]] = None, - async_client: Optional[httpx.AsyncClient] = None, + client_id: str | None = None, + client_secret: str | None = None, + scope: list[str] | None = None, + async_client: httpx.AsyncClient | None = None, ) -> OAuthParams: """Get issuer metadata and build the oauth required params.""" metadata = await get_oauth_server_metadata(server_url=server_url) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index 8be5585c..115eff61 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -1,14 +1,17 @@ -from typing import Optional, Union import logging import typing +from collections.abc import Sequence from contextlib import AsyncExitStack -from typing import Protocol, Any +from typing import Any, Protocol -from mcp import ClientSession -from mcp.types import ListPromptsResult, EmbeddedResource, ImageContent, TextContent +from mcp import ClientSession # pyright: ignore[reportMissingImports] +from mcp.types import ( # pyright: ignore[reportMissingImports] + ContentBlock, + ListPromptsResult, +) from mistralai.extra.exceptions import MCPException -from mistralai.models import ( +from mistralai.client.models import ( FunctionTool, Function, SystemMessageTypedDict, @@ -20,8 +23,8 @@ class MCPSystemPrompt(typing.TypedDict): - description: Optional[str] - messages: list[Union[SystemMessageTypedDict, AssistantMessageTypedDict]] + description: str | None + messages: list[SystemMessageTypedDict | AssistantMessageTypedDict] class MCPClientProtocol(Protocol): @@ -29,7 +32,7 @@ class MCPClientProtocol(Protocol): _name: str - async def initialize(self, exit_stack: Optional[AsyncExitStack]) -> None: + async def initialize(self, exit_stack: AsyncExitStack | None) -> None: ... async def aclose(self) -> None: @@ -39,7 +42,7 @@ async def get_tools(self) -> list[FunctionTool]: ... async def execute_tool( - self, name: str, arguments: dict + self, name: str, arguments: dict[str, Any] ) -> list[TextChunkTypedDict]: ... @@ -57,20 +60,18 @@ class MCPClientBase(MCPClientProtocol): _session: ClientSession - def __init__(self, name: Optional[str] = None): + def __init__(self, name: str | None = None): self._name = name or self.__class__.__name__ - self._exit_stack: Optional[AsyncExitStack] = None + self._exit_stack: AsyncExitStack | None = None self._is_initialized = False - def _convert_content( - self, mcp_content: Union[TextContent, ImageContent, EmbeddedResource] - ) -> TextChunkTypedDict: + def _convert_content(self, mcp_content: ContentBlock) -> TextChunkTypedDict: if not mcp_content.type == "text": raise MCPException("Only supporting text tool responses for now.") return {"type": "text", "text": mcp_content.text} def _convert_content_list( - self, mcp_contents: list[Union[TextContent, ImageContent, EmbeddedResource]] + self, mcp_contents: Sequence[ContentBlock] ) -> list[TextChunkTypedDict]: content_chunks = [] for mcp_content in mcp_contents: @@ -83,7 +84,6 @@ async def get_tools(self) -> list[FunctionTool]: for mcp_tool in mcp_tools.tools: tools.append( FunctionTool( - type="function", function=Function( name=mcp_tool.name, description=mcp_tool.description, @@ -108,7 +108,7 @@ async def get_system_prompt( "description": prompt_result.description, "messages": [ typing.cast( - Union[SystemMessageTypedDict, AssistantMessageTypedDict], + SystemMessageTypedDict | AssistantMessageTypedDict, { "role": message.role, "content": self._convert_content(mcp_content=message.content), @@ -121,7 +121,7 @@ async def get_system_prompt( async def list_system_prompts(self) -> ListPromptsResult: return await self._session.list_prompts() - async def initialize(self, exit_stack: Optional[AsyncExitStack] = None) -> None: + async def initialize(self, exit_stack: AsyncExitStack | None = None) -> None: """Initialize the MCP session.""" # client is already initialized so return if self._is_initialized: diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py index 2dfe7a2d..b4929c54 100644 --- a/src/mistralai/extra/mcp/sse.py +++ b/src/mistralai/extra/mcp/sse.py @@ -1,24 +1,22 @@ import http import logging -import typing -from typing import Any, Optional from contextlib import AsyncExitStack from functools import cached_property +from typing import Any import httpx +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from authlib.oauth2.rfc6749 import OAuth2Token +from mcp.client.sse import sse_client # pyright: ignore[reportMissingImports] +from mcp.shared.message import SessionMessage # pyright: ignore[reportMissingImports] from mistralai.extra.exceptions import MCPAuthException from mistralai.extra.mcp.base import ( MCPClientBase, ) from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client -from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream - -from mcp.client.sse import sse_client -from mcp.shared.message import SessionMessage -from authlib.oauth2.rfc6749 import OAuth2Token -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel logger = logging.getLogger(__name__) @@ -27,7 +25,7 @@ class SSEServerParams(BaseModel): """Parameters required for a MCPClient with SSE transport""" url: str - headers: Optional[dict[str, Any]] = None + headers: dict[str, Any] | None = None timeout: float = 5 sse_read_timeout: float = 60 * 5 @@ -41,20 +39,20 @@ class MCPClientSSE(MCPClientBase): This is possibly going to change in the future since the protocol has ongoing discussions. """ - _oauth_params: Optional[OAuthParams] + _oauth_params: OAuthParams | None _sse_params: SSEServerParams def __init__( self, sse_params: SSEServerParams, - name: Optional[str] = None, - oauth_params: Optional[OAuthParams] = None, - auth_token: Optional[OAuth2Token] = None, + name: str | None = None, + oauth_params: OAuthParams | None = None, + auth_token: OAuth2Token | None = None, ): super().__init__(name=name) self._sse_params = sse_params - self._oauth_params: Optional[OAuthParams] = oauth_params - self._auth_token: Optional[OAuth2Token] = auth_token + self._oauth_params: OAuthParams | None = oauth_params + self._auth_token: OAuth2Token | None = auth_token @cached_property def base_url(self) -> str: @@ -142,7 +140,7 @@ async def requires_auth(self) -> bool: async def _get_transport( self, exit_stack: AsyncExitStack ) -> tuple[ - MemoryObjectReceiveStream[typing.Union[SessionMessage, Exception]], + MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage], ]: try: diff --git a/src/mistralai/extra/mcp/stdio.py b/src/mistralai/extra/mcp/stdio.py index 28c3b8c5..a548066c 100644 --- a/src/mistralai/extra/mcp/stdio.py +++ b/src/mistralai/extra/mcp/stdio.py @@ -1,12 +1,9 @@ -from typing import Optional import logging from contextlib import AsyncExitStack -from mistralai.extra.mcp.base import ( - MCPClientBase, -) +from mcp import StdioServerParameters, stdio_client # pyright: ignore[reportMissingImports] -from mcp import stdio_client, StdioServerParameters +from mistralai.extra.mcp.base import MCPClientBase logger = logging.getLogger(__name__) @@ -14,7 +11,9 @@ class MCPClientSTDIO(MCPClientBase): """MCP client that uses stdio for communication.""" - def __init__(self, stdio_params: StdioServerParameters, name: Optional[str] = None): + def __init__( + self, stdio_params: StdioServerParameters, name: str | None = None + ): super().__init__(name=name) self._stdio_params = stdio_params diff --git a/src/mistralai/extra/observability/__init__.py b/src/mistralai/extra/observability/__init__.py new file mode 100644 index 00000000..4ff5873c --- /dev/null +++ b/src/mistralai/extra/observability/__init__.py @@ -0,0 +1,15 @@ +from contextlib import contextmanager + +from opentelemetry import trace as otel_trace + +from .otel import MISTRAL_SDK_OTEL_TRACER_NAME + + +@contextmanager +def trace(name: str, **kwargs): + tracer = otel_trace.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + with tracer.start_as_current_span(name, **kwargs) as span: + yield span + + +__all__ = ["trace"] diff --git a/src/mistralai/extra/observability/otel.py b/src/mistralai/extra/observability/otel.py new file mode 100644 index 00000000..4a8808ce --- /dev/null +++ b/src/mistralai/extra/observability/otel.py @@ -0,0 +1,372 @@ +import copy +import json +import logging +import os +import traceback +from datetime import datetime, timezone +from enum import Enum + +import httpx +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes +import opentelemetry.semconv._incubating.attributes.http_attributes as http_attributes +import opentelemetry.semconv.attributes.server_attributes as server_attributes +from opentelemetry import propagate, trace +from opentelemetry.sdk.trace import SpanProcessor +from opentelemetry.trace import Span, Status, StatusCode, Tracer, set_span_in_context + +logger = logging.getLogger(__name__) + + +OTEL_SERVICE_NAME: str = "mistralai_sdk" +MISTRAL_SDK_OTEL_TRACER_NAME: str = OTEL_SERVICE_NAME + "_tracer" + +MISTRAL_SDK_DEBUG_TRACING: bool = os.getenv("MISTRAL_SDK_DEBUG_TRACING", "false").lower() == "true" +DEBUG_HINT: str = "To see detailed tracing logs, set MISTRAL_SDK_DEBUG_TRACING=true." + + +class MistralAIAttributes: + MISTRAL_AI_TOTAL_TOKENS = "mistral_ai.request.total_tokens" + MISTRAL_AI_TOOL_CALL_ARGUMENTS = "mistral_ai.tool.call.arguments" + MISTRAL_AI_MESSAGE_ID = "mistral_ai.message.id" + MISTRAL_AI_OPERATION_NAME= "mistral_ai.operation.name" + MISTRAL_AI_OCR_USAGE_PAGES_PROCESSED = "mistral_ai.ocr.usage.pages_processed" + MISTRAL_AI_OCR_USAGE_DOC_SIZE_BYTES = "mistral_ai.ocr.usage.doc_size_bytes" + MISTRAL_AI_OPERATION_ID = "mistral_ai.operation.id" + MISTRAL_AI_ERROR_TYPE = "mistral_ai.error.type" + MISTRAL_AI_ERROR_MESSAGE = "mistral_ai.error.message" + MISTRAL_AI_ERROR_CODE = "mistral_ai.error.code" + MISTRAL_AI_FUNCTION_CALL_ARGUMENTS = "mistral_ai.function.call.arguments" + +class MistralAINameValues(Enum): + OCR = "ocr" + +class TracingErrors(Exception, Enum): + FAILED_TO_CREATE_SPAN_FOR_REQUEST = "Failed to create span for request." + FAILED_TO_ENRICH_SPAN_WITH_RESPONSE = "Failed to enrich span with response." + FAILED_TO_HANDLE_ERROR_IN_SPAN = "Failed to handle error in span." + FAILED_TO_END_SPAN = "Failed to end span." + + def __str__(self): + return str(self.value) + +class GenAISpanEnum(str, Enum): + CONVERSATION = "conversation" + CONV_REQUEST = "POST /v1/conversations" + EXECUTE_TOOL = "execute_tool" + VALIDATE_RUN = "validate_run" + + @staticmethod + def function_call(func_name: str): + return f"function_call[{func_name}]" + + +def parse_time_to_nanos(ts: str) -> int: + dt = datetime.fromisoformat(ts.replace("Z", "+00:00")).astimezone(timezone.utc) + return int(dt.timestamp() * 1e9) + +def set_available_attributes(span: Span, attributes: dict) -> None: + for attribute, value in attributes.items(): + if value: + span.set_attribute(attribute, value) + + +def enrich_span_from_request(span: Span, request: httpx.Request) -> Span: + if not request.url.port: + # From httpx doc: + # Note that the URL class performs port normalization as per the WHATWG spec. + # Default ports for "http", "https", "ws", "wss", and "ftp" schemes are always treated as None. + # Handling default ports since most of the time we are using https + if request.url.scheme == "https": + port = 443 + elif request.url.scheme == "http": + port = 80 + else: + port = -1 + else: + port = request.url.port + + span.set_attributes({ + http_attributes.HTTP_REQUEST_METHOD: request.method, + http_attributes.HTTP_URL: str(request.url), + server_attributes.SERVER_ADDRESS: request.headers.get("host", ""), + server_attributes.SERVER_PORT: port + }) + if request._content: + request_body = json.loads(request._content) + + attributes = { + gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT: request_body.get("n", None), + gen_ai_attributes.GEN_AI_REQUEST_ENCODING_FORMATS: request_body.get("encoding_formats", None), + gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: request_body.get("frequency_penalty", None), + gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS: request_body.get("max_tokens", None), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: request_body.get("model", None), + gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY: request_body.get("presence_penalty", None), + gen_ai_attributes.GEN_AI_REQUEST_SEED: request_body.get("random_seed", None), + gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES: request_body.get("stop", None), + gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE: request_body.get("temperature", None), + gen_ai_attributes.GEN_AI_REQUEST_TOP_P: request_body.get("top_p", None), + gen_ai_attributes.GEN_AI_REQUEST_TOP_K: request_body.get("top_k", None), + # Input messages are likely to be large, containing user/PII data and other sensitive information. + # Also structured attributes are not yet supported on spans in Python. + # For those reasons, we will not record the input messages for now. + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: None, + } + # Set attributes only if they are not None. + # From OpenTelemetry documentation: None is not a valid attribute value per spec / is not a permitted value type for an attribute. + set_available_attributes(span, attributes) + return span + + +def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: str, response: httpx.Response) -> None: + span.set_status(Status(StatusCode.OK)) + response_data = json.loads(response.content) + + # Base attributes + attributes: dict[str, str | int] = { + http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, + MistralAIAttributes.MISTRAL_AI_OPERATION_ID: operation_id, + gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value + } + + # Add usage attributes if available + usage = response_data.get("usage", {}) + if usage: + attributes.update({ + gen_ai_attributes.GEN_AI_USAGE_PROMPT_TOKENS: usage.get("prompt_tokens", 0), + gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS: usage.get("completion_tokens", 0), + MistralAIAttributes.MISTRAL_AI_TOTAL_TOKENS: usage.get("total_tokens", 0) + }) + + span.set_attributes(attributes) + if operation_id == "agents_api_v1_agents_create": + # Semantics from https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-agent-spans/#create-agent-span + agent_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CREATE_AGENT.value, + gen_ai_attributes.GEN_AI_AGENT_DESCRIPTION: response_data.get("description", ""), + gen_ai_attributes.GEN_AI_AGENT_ID: response_data.get("id", ""), + gen_ai_attributes.GEN_AI_AGENT_NAME: response_data.get("name", ""), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: response_data.get("model", ""), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: response_data.get("instructions", "") + } + span.set_attributes(agent_attributes) + if operation_id in ["agents_api_v1_conversations_start", "agents_api_v1_conversations_append"]: + outputs = response_data.get("outputs", []) + conversation_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.INVOKE_AGENT.value, + gen_ai_attributes.GEN_AI_CONVERSATION_ID: response_data.get("conversation_id", "") + } + span.set_attributes(conversation_attributes) + parent_context = set_span_in_context(span) + + for output in outputs: + # TODO: Only enrich the spans if it's a single turn conversation. + # Multi turn conversations are handled in the extra.run.tools.create_function_result function + if output["type"] == "function.call": + pass + if output["type"] == "tool.execution": + start_ns = parse_time_to_nanos(output["created_at"]) + end_ns = parse_time_to_nanos(output["completed_at"]) + child_span = tracer.start_span("Tool Execution", start_time=start_ns, context=parent_context) + child_span.set_attributes({"agent.trace.public": ""}) + tool_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, + gen_ai_attributes.GEN_AI_TOOL_CALL_ID: output.get("id", ""), + MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: output.get("arguments", ""), + gen_ai_attributes.GEN_AI_TOOL_NAME: output.get("name", "") + } + child_span.set_attributes(tool_attributes) + child_span.end(end_time=end_ns) + if output["type"] == "message.output": + start_ns = parse_time_to_nanos(output["created_at"]) + end_ns = parse_time_to_nanos(output["completed_at"]) + child_span = tracer.start_span("Message Output", start_time=start_ns, context=parent_context) + child_span.set_attributes({"agent.trace.public": ""}) + message_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CHAT.value, + gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value, + MistralAIAttributes.MISTRAL_AI_MESSAGE_ID: output.get("id", ""), + gen_ai_attributes.GEN_AI_AGENT_ID: output.get("agent_id", ""), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: output.get("model", "") + } + child_span.set_attributes(message_attributes) + child_span.end(end_time=end_ns) + if operation_id == "ocr_v1_ocr_post": + usage_info = response_data.get("usage_info", "") + ocr_attributes = { + MistralAIAttributes.MISTRAL_AI_OPERATION_NAME: MistralAINameValues.OCR.value, + MistralAIAttributes.MISTRAL_AI_OCR_USAGE_PAGES_PROCESSED: usage_info.get("pages_processed", "") if usage_info else "", + MistralAIAttributes.MISTRAL_AI_OCR_USAGE_DOC_SIZE_BYTES: usage_info.get("doc_size_bytes", "") if usage_info else "", + gen_ai_attributes.GEN_AI_REQUEST_MODEL: response_data.get("model", "") + } + span.set_attributes(ocr_attributes) + + +class GenAISpanProcessor(SpanProcessor): + def on_start(self, span, parent_context = None): + span.set_attributes({"agent.trace.public": ""}) + + +def get_or_create_otel_tracer() -> tuple[bool, Tracer]: + """ + Get a tracer from the current TracerProvider. + + The SDK does not set up its own TracerProvider - it relies on the application + to configure OpenTelemetry. This follows OTEL best practices where: + - Libraries/SDKs get tracers from the global provider + - Applications configure the TracerProvider + + If no TracerProvider is configured, the ProxyTracerProvider (default) will + return a NoOp tracer, effectively disabling tracing. Once the application + sets up a real TracerProvider, subsequent spans will be recorded. + + Returns: + Tuple[bool, Tracer]: (tracing_enabled, tracer) + - tracing_enabled is True if a real TracerProvider is configured + - tracer is always valid (may be NoOp if no provider configured) + """ + tracer_provider = trace.get_tracer_provider() + tracer = tracer_provider.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + + # Tracing is considered enabled if we have a real TracerProvider (not the default proxy) + tracing_enabled = not isinstance(tracer_provider, trace.ProxyTracerProvider) + + return tracing_enabled, tracer + +def get_traced_request_and_span( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + request: httpx.Request, +) -> tuple[httpx.Request, Span | None]: + if not tracing_enabled: + return request, span + + try: + span = tracer.start_span(name=operation_id) + span.set_attributes({"agent.trace.public": ""}) + # Inject the span context into the request headers to be used by the backend service to continue the trace + propagate.inject(request.headers, context=set_span_in_context(span)) + span = enrich_span_from_request(span, request) + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_CREATE_SPAN_FOR_REQUEST, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + if span: + end_span(span=span) + span = None + + return request, span + + +def get_traced_response( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + response: httpx.Response, +) -> httpx.Response: + if not tracing_enabled or not span: + return response + try: + is_stream_response = not response.is_closed and not response.is_stream_consumed + if is_stream_response: + return TracedResponse.from_response(resp=response, span=span) + enrich_span_from_response( + tracer, span, operation_id, response + ) + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_ENRICH_SPAN_WITH_RESPONSE, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + if span: + end_span(span=span) + return response + +def get_response_and_error( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + response: httpx.Response, + error: Exception | None, +) -> tuple[httpx.Response, Exception | None]: + if not tracing_enabled or not span: + return response, error + try: + if error: + span.record_exception(error) + span.set_status(Status(StatusCode.ERROR, str(error))) + if hasattr(response, "_content") and response._content: + response_body = json.loads(response._content) + if response_body.get("object", "") == "error": + if error_msg := response_body.get("message", ""): + attributes = { + http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, + MistralAIAttributes.MISTRAL_AI_ERROR_TYPE: response_body.get("type", ""), + MistralAIAttributes.MISTRAL_AI_ERROR_MESSAGE: error_msg, + MistralAIAttributes.MISTRAL_AI_ERROR_CODE: response_body.get("code", ""), + } + for attribute, value in attributes.items(): + if value: + span.set_attribute(attribute, value) + span.end() + span = None + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_HANDLE_ERROR_IN_SPAN, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + + if span: + span.end() + span = None + return response, error + + +def end_span(span: Span) -> None: + try: + span.end() + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_END_SPAN, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + +class TracedResponse(httpx.Response): + """ + TracedResponse is a subclass of httpx.Response that ends the span when the response is closed. + + This hack allows ending the span only once the stream is fully consumed. + """ + def __init__(self, *args, span: Span | None, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.span = span + + def close(self) -> None: + if self.span: + end_span(span=self.span) + super().close() + + async def aclose(self) -> None: + if self.span: + end_span(span=self.span) + await super().aclose() + + @classmethod + def from_response(cls, resp: httpx.Response, span: Span | None) -> "TracedResponse": + traced_resp = cls.__new__(cls) + traced_resp.__dict__ = copy.copy(resp.__dict__) + traced_resp.span = span + + # Warning: this syntax bypasses the __init__ method. + # If you add init logic in the TracedResponse.__init__ method, you will need to add the following line for it to execute: + # traced_resp.__init__(your_arguments) + + return traced_resp diff --git a/src/mistralai/extra/realtime/__init__.py b/src/mistralai/extra/realtime/__init__.py new file mode 100644 index 00000000..7b80e045 --- /dev/null +++ b/src/mistralai/extra/realtime/__init__.py @@ -0,0 +1,25 @@ +from mistralai.client.models import ( + AudioEncoding, + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, +) + +from .connection import UnknownRealtimeEvent, RealtimeConnection +from .transcription import RealtimeTranscription + +__all__ = [ + "AudioEncoding", + "AudioFormat", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", + "RealtimeConnection", + "RealtimeTranscription", + "UnknownRealtimeEvent", +] diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py new file mode 100644 index 00000000..6547052b --- /dev/null +++ b/src/mistralai/extra/realtime/connection.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import base64 +import json +from asyncio import CancelledError +from collections import deque +from typing import Any, AsyncIterator, Deque, Optional, Union + +from pydantic import ValidationError, BaseModel + +try: + from websockets.asyncio.client import ClientConnection # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioFlush, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdatePayload, + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, +) +from mistralai.client.types import UNSET + + +class UnknownRealtimeEvent(BaseModel): + """ + Forward-compat fallback event: + - unknown message type + - invalid JSON payload + - schema validation failure + """ + + type: Optional[str] + content: Any + error: Optional[str] = None + + +RealtimeEvent = Union[ + # session lifecycle + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + # server errors + RealtimeTranscriptionError, + # transcription events + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, + TranscriptionStreamDone, + # forward-compat fallback + UnknownRealtimeEvent, +] + +_MESSAGE_MODELS: dict[str, Any] = { + "session.created": RealtimeTranscriptionSessionCreated, + "session.updated": RealtimeTranscriptionSessionUpdated, + "error": RealtimeTranscriptionError, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, + "transcription.done": TranscriptionStreamDone, +} + + +def parse_realtime_event(payload: Any) -> RealtimeEvent: + """ + Tolerant parser: + - unknown event type -> UnknownRealtimeEvent + - validation failures -> UnknownRealtimeEvent (includes error string) + - invalid payload -> UnknownRealtimeEvent + """ + if not isinstance(payload, dict): + return UnknownRealtimeEvent( + type=None, content=payload, error="expected JSON object" + ) + + msg_type = payload.get("type") + if not isinstance(msg_type, str): + return UnknownRealtimeEvent( + type=None, content=payload, error="missing/invalid 'type'" + ) + + model_cls = _MESSAGE_MODELS.get(msg_type) + if model_cls is None: + return UnknownRealtimeEvent( + type=msg_type, content=payload, error="unknown event type" + ) + try: + parsed = model_cls.model_validate(payload) + return parsed + except ValidationError as exc: + return UnknownRealtimeEvent(type=msg_type, content=payload, error=str(exc)) + + +class RealtimeConnection: + def __init__( + self, + websocket: ClientConnection, + session: RealtimeTranscriptionSession, + *, + initial_events: Optional[list[RealtimeEvent]] = None, + ) -> None: + self._websocket = websocket + self._session = session + self._closed = False + self._initial_events: Deque[RealtimeEvent] = deque(initial_events or []) + + @property + def request_id(self) -> str: + return self._session.request_id + + @property + def session(self) -> RealtimeTranscriptionSession: + return self._session + + @property + def audio_format(self) -> AudioFormat: + return self._session.audio_format + + @property + def is_closed(self) -> bool: + return self._closed + + async def send_audio( + self, audio_bytes: Union[bytes, bytearray, memoryview] + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + message = RealtimeTranscriptionInputAudioAppend( + audio=base64.b64encode(bytes(audio_bytes)).decode("ascii") + ) + await self._websocket.send(message.model_dump_json()) + + async def flush_audio(self) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + await self._websocket.send( + RealtimeTranscriptionInputAudioFlush().model_dump_json() + ) + + async def update_session( + self, + audio_format: Optional[AudioFormat] = None, + *, + target_streaming_delay_ms: Optional[int] = None, + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + if audio_format is None and target_streaming_delay_ms is None: + raise ValueError("At least one session field must be provided") + + message = RealtimeTranscriptionSessionUpdateMessage( + session=RealtimeTranscriptionSessionUpdatePayload( + audio_format=audio_format if audio_format is not None else UNSET, + target_streaming_delay_ms=target_streaming_delay_ms + if target_streaming_delay_ms is not None + else UNSET, + ) + ) + await self._websocket.send(message.model_dump_json()) + + async def end_audio(self) -> None: + if self._closed: + return + await self._websocket.send( + RealtimeTranscriptionInputAudioEnd().model_dump_json() + ) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + if self._closed: + return + self._closed = True + await self._websocket.close(code=code, reason=reason) + + async def __aenter__(self) -> "RealtimeConnection": + return self + + async def __aexit__(self, exc_type, exc, tb) -> None: + await self.close() + + def __aiter__(self) -> AsyncIterator[RealtimeEvent]: + return self.events() + + async def events(self) -> AsyncIterator[RealtimeEvent]: + # replay any handshake/prelude events (including session.created) + while self._initial_events: + ev = self._initial_events.popleft() + self._apply_session_updates(ev) + yield ev + + try: + async for msg in self._websocket: + text = ( + msg.decode("utf-8", errors="replace") + if isinstance(msg, (bytes, bytearray)) + else msg + ) + try: + data = json.loads(text) + except Exception as exc: + yield UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + continue + + ev = parse_realtime_event(data) + self._apply_session_updates(ev) + yield ev + except CancelledError: + pass + finally: + await self.close() + + def _apply_session_updates(self, ev: RealtimeEvent) -> None: + if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance( + ev, RealtimeTranscriptionSessionUpdated + ): + self._session = ev.session diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py new file mode 100644 index 00000000..b216e676 --- /dev/null +++ b/src/mistralai/extra/realtime/transcription.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +import asyncio +import json +import time +from typing import AsyncIterator, Mapping, Optional +from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse + +try: + from websockets.asyncio.client import ( + ClientConnection, + connect, + ) # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai.client import models, utils +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, +) +from mistralai.client.sdkconfiguration import SDKConfiguration +from mistralai.client.utils import generate_url, get_security, get_security_from_env + +from ..exceptions import RealtimeTranscriptionException, RealtimeTranscriptionWSError +from .connection import ( + RealtimeConnection, + RealtimeEvent, + UnknownRealtimeEvent, + parse_realtime_event, +) + + +class RealtimeTranscription: + """Client for realtime transcription over WebSocket (websockets >= 13.0).""" + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self._sdk_config = sdk_config + + def _build_url( + self, + model: str, + *, + server_url: Optional[str], + query_params: Mapping[str, str], + ) -> str: + if server_url is not None: + base_url = utils.remove_suffix(server_url, "/") + else: + base_url, _ = self._sdk_config.get_server_details() + + url = generate_url(base_url, "/v1/audio/transcriptions/realtime", None) + + parsed = urlparse(url) + merged = dict(parse_qsl(parsed.query, keep_blank_values=True)) + merged["model"] = model + merged.update(dict(query_params)) + + return urlunparse(parsed._replace(query=urlencode(merged))) + + async def connect( + self, + model: str, + audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RealtimeConnection: + if timeout_ms is None: + timeout_ms = self._sdk_config.timeout_ms + + security = self._sdk_config.security + if security is not None and callable(security): + security = security() + + resolved_security = get_security_from_env(security, models.Security) + + headers: dict[str, str] = {} + query_params: dict[str, str] = {} + + if resolved_security is not None: + security_headers, security_query = get_security(resolved_security) + headers |= security_headers + for key, values in security_query.items(): + if values: + query_params[key] = values[-1] + + if http_headers is not None: + headers |= dict(http_headers) + + url = self._build_url(model, server_url=server_url, query_params=query_params) + + parsed = urlparse(url) + if parsed.scheme == "https": + parsed = parsed._replace(scheme="wss") + elif parsed.scheme == "http": + parsed = parsed._replace(scheme="ws") + ws_url = urlunparse(parsed) + open_timeout = None if timeout_ms is None else timeout_ms / 1000.0 + user_agent = self._sdk_config.user_agent + + websocket: Optional[ClientConnection] = None + try: + websocket = await connect( + ws_url, + additional_headers=dict(headers), + open_timeout=open_timeout, + user_agent_header=user_agent, + ) + + session, initial_events = await _recv_handshake( + websocket, timeout_ms=timeout_ms + ) + connection = RealtimeConnection( + websocket=websocket, + session=session, + initial_events=initial_events, + ) + + if audio_format is not None or target_streaming_delay_ms is not None: + await connection.update_session( + audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, + ) + + return connection + + except RealtimeTranscriptionException: + if websocket is not None: + await websocket.close() + raise + except Exception as exc: + if websocket is not None: + await websocket.close() + raise RealtimeTranscriptionException(f"Failed to connect: {exc}") from exc + + async def transcribe_stream( + self, + audio_stream: AsyncIterator[bytes], + model: str, + audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncIterator[RealtimeEvent]: + """ + Flow + - opens connection + - streams audio in background + - yields events from the connection + """ + async with await self.connect( + model=model, + audio_format=audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, + server_url=server_url, + timeout_ms=timeout_ms, + http_headers=http_headers, + ) as connection: + + async def _send() -> None: + async for chunk in audio_stream: + if connection.is_closed: + break + await connection.send_audio(chunk) + await connection.flush_audio() + await connection.end_audio() + + send_task = asyncio.create_task(_send()) + + try: + async for event in connection: + yield event + + # stop early (caller still sees the terminating event) + if isinstance(event, RealtimeTranscriptionError): + break + if getattr(event, "type", None) == "transcription.done": + break + finally: + send_task.cancel() + try: + await send_task + except asyncio.CancelledError: + pass + await connection.close() + + +def _extract_error_message(payload: dict) -> str: + err = payload.get("error") + if isinstance(err, dict): + msg = err.get("message") + if isinstance(msg, str): + return msg + if isinstance(msg, dict): + detail = msg.get("detail") + if isinstance(detail, str): + return detail + return "Realtime transcription error" + + +async def _recv_handshake( + websocket: ClientConnection, + *, + timeout_ms: Optional[int], +) -> tuple[RealtimeTranscriptionSession, list[RealtimeEvent]]: + """ + Read messages until session.created or error. + Replay all messages read during handshake as initial events (lossless). + """ + timeout_s = None if timeout_ms is None else timeout_ms / 1000.0 + deadline = None if timeout_s is None else (time.monotonic() + timeout_s) + + initial_events: list[RealtimeEvent] = [] + + def remaining() -> Optional[float]: + if deadline is None: + return None + return max(0.0, deadline - time.monotonic()) + + try: + while True: + raw = await asyncio.wait_for(websocket.recv(), timeout=remaining()) + text = ( + raw.decode("utf-8", errors="replace") + if isinstance(raw, (bytes, bytearray)) + else raw + ) + + try: + payload = json.loads(text) + except Exception as exc: + initial_events.append( + UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + ) + continue + + msg_type = payload.get("type") if isinstance(payload, dict) else None + if msg_type == "error" and isinstance(payload, dict): + parsed = parse_realtime_event(payload) + initial_events.append(parsed) + if isinstance(parsed, RealtimeTranscriptionError): + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=parsed, + raw=payload, + ) + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=None, + raw=payload, + ) + + event = parse_realtime_event(payload) + initial_events.append(event) + + if isinstance(event, RealtimeTranscriptionSessionCreated): + return event.session, initial_events + + except asyncio.TimeoutError as exc: + raise RealtimeTranscriptionException( + "Timeout waiting for session creation." + ) from exc + except RealtimeTranscriptionException: + raise + except Exception as exc: + raise RealtimeTranscriptionException( + f"Unexpected websocket handshake failure: {exc}" + ) from exc diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 08350a84..7ade705f 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -1,50 +1,46 @@ import asyncio import inspect import typing -from contextlib import AsyncExitStack -from functools import wraps from collections.abc import Callable - +from contextlib import AsyncExitStack from dataclasses import dataclass, field -from typing import Union, Optional +from functools import wraps +from logging import getLogger import pydantic -from mistralai.extra import ( - response_format_from_pydantic_model, -) +from mistralai.extra import response_format_from_pydantic_model from mistralai.extra.exceptions import RunException from mistralai.extra.mcp.base import MCPClientProtocol from mistralai.extra.run.result import RunResult -from mistralai.types.basemodel import OptionalNullable, BaseModel, UNSET -from mistralai.models import ( - ResponseFormat, - FunctionCallEntry, - Tools, - ToolsTypedDict, +from mistralai.extra.run.tools import ( + RunCoroutine, + RunFunction, + RunMCPTool, + RunTool, + create_function_result, + create_tool_call, +) +from mistralai.client.models import ( CompletionArgs, CompletionArgsTypedDict, - FunctionResultEntry, ConversationInputs, ConversationInputsTypedDict, + ConversationRequestTool, + ConversationRequestToolTypedDict, + FunctionCallEntry, + FunctionResultEntry, FunctionTool, - MessageInputEntry, InputEntries, + MessageInputEntry, + ResponseFormat, + UnknownAgentTool, + UpdateAgentRequestTool, ) - -from logging import getLogger - -from mistralai.extra.run.tools import ( - create_function_result, - RunFunction, - create_tool_call, - RunTool, - RunMCPTool, - RunCoroutine, -) +from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET if typing.TYPE_CHECKING: - from mistralai import Beta, OptionalNullable + from mistralai.client import Beta, OptionalNullable logger = getLogger(__name__) @@ -56,8 +52,8 @@ class AgentRequestKwargs(typing.TypedDict): class ModelRequestKwargs(typing.TypedDict): model: str instructions: OptionalNullable[str] - tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] - completion_args: OptionalNullable[Union[CompletionArgs, CompletionArgsTypedDict]] + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] @dataclass @@ -72,7 +68,7 @@ class RunContext: passed if the user wants to continue an existing conversation. model (Options[str]): The model name to be used for the conversation. Can't be used along with 'agent_id'. agent_id (Options[str]): The agent id to be used for the conversation. Can't be used along with 'model'. - output_format (Optional[type[BaseModel]]): The output format expected from the conversation. It represents + output_format (type[BaseModel] | None): The output format expected from the conversation. It represents the `response_format` which is part of the `CompletionArgs`. request_count (int): The number of requests made in the current `RunContext`. continue_on_fn_error (bool): Flag to determine if the conversation should continue when function execution @@ -83,10 +79,10 @@ class RunContext: _callable_tools: dict[str, RunTool] = field(init=False, default_factory=dict) _mcp_clients: list[MCPClientProtocol] = field(init=False, default_factory=list) - conversation_id: Optional[str] = field(default=None) - model: Optional[str] = field(default=None) - agent_id: Optional[str] = field(default=None) - output_format: Optional[type[BaseModel]] = field(default=None) + conversation_id: str | None = field(default=None) + model: str | None = field(default=None) + agent_id: str | None = field(default=None) + output_format: type[BaseModel] | None = field(default=None) request_count: int = field(default=0) continue_on_fn_error: bool = field(default=False) @@ -192,10 +188,12 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs ) agent = await beta_client.agents.get_async(agent_id=self.agent_id) agent_tools = agent.tools or [] - updated_tools = [] - for i in range(len(agent_tools)): - tool = agent_tools[i] - if tool.type != "function": + updated_tools: list[UpdateAgentRequestTool] = [] + for tool in agent_tools: + if isinstance(tool, UnknownAgentTool): + # Skip unknown tools - can't include them in update request + continue + if not isinstance(tool, FunctionTool): updated_tools.append(tool) elif tool.function.name in self._callable_tools: # function already exists in the agent, don't add it again @@ -215,10 +213,8 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs async def prepare_model_request( self, - tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, - completion_args: OptionalNullable[ - Union[CompletionArgs, CompletionArgsTypedDict] - ] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, instructions: OptionalNullable[str] = None, ) -> ModelRequestKwargs: if self.model is None: @@ -233,7 +229,7 @@ async def prepare_model_request( request_tools = [] if isinstance(tools, list): for tool in tools: - request_tools.append(typing.cast(Tools, tool)) + request_tools.append(typing.cast(ConversationRequestTool, tool)) for tool in self.get_tools(): request_tools.append(tool) return ModelRequestKwargs( @@ -254,14 +250,12 @@ async def _validate_run( *, beta_client: "Beta", run_ctx: RunContext, - inputs: Union[ConversationInputs, ConversationInputsTypedDict], + inputs: ConversationInputs | ConversationInputsTypedDict, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, - completion_args: OptionalNullable[ - Union[CompletionArgs, CompletionArgsTypedDict] - ] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, ) -> tuple[ - Union[AgentRequestKwargs, ModelRequestKwargs], RunResult, list[InputEntries] + AgentRequestKwargs | ModelRequestKwargs, RunResult, list[InputEntries] ]: input_entries: list[InputEntries] = [] if isinstance(inputs, str): @@ -277,7 +271,7 @@ async def _validate_run( output_model=run_ctx.output_format, conversation_id=run_ctx.conversation_id, ) - req: Union[AgentRequestKwargs, ModelRequestKwargs] + req: AgentRequestKwargs | ModelRequestKwargs if run_ctx.agent_id: if tools or completion_args: raise RunException("Can't set tools or completion_args when using an agent") diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py index 9592dccf..6e2bcc8a 100644 --- a/src/mistralai/extra/run/result.py +++ b/src/mistralai/extra/run/result.py @@ -1,12 +1,13 @@ import datetime import json import typing -from typing import Union, Annotated, Optional, Literal from dataclasses import dataclass, field -from pydantic import Discriminator, Tag, BaseModel +from typing import Annotated, Literal + +from pydantic import BaseModel, Discriminator, Tag from mistralai.extra.utils.response_format import pydantic_model_from_json -from mistralai.models import ( +from mistralai.client.models import ( FunctionResultEntry, FunctionCallEntry, MessageOutputEntry, @@ -33,17 +34,17 @@ ToolReferenceChunk, FunctionCallEntryArguments, ) -from mistralai.utils import get_discriminator - -RunOutputEntries = typing.Union[ - MessageOutputEntry, - FunctionCallEntry, - FunctionResultEntry, - AgentHandoffEntry, - ToolExecutionEntry, -] +from mistralai.client.utils import get_discriminator + +RunOutputEntries = ( + MessageOutputEntry + | FunctionCallEntry + | FunctionResultEntry + | AgentHandoffEntry + | ToolExecutionEntry +) -RunEntries = typing.Union[RunOutputEntries, MessageInputEntry] +RunEntries = RunOutputEntries | MessageInputEntry def as_text(entry: RunOutputEntries) -> str: @@ -140,12 +141,12 @@ class RunFiles: @dataclass class RunResult: input_entries: list[InputEntries] - conversation_id: Optional[str] = field(default=None) + conversation_id: str | None = field(default=None) output_entries: list[RunOutputEntries] = field(default_factory=list) files: dict[str, RunFiles] = field(default_factory=dict) - output_model: Optional[type[BaseModel]] = field(default=None) + output_model: type[BaseModel] | None = field(default=None) - def get_file(self, file_id: str) -> Optional[RunFiles]: + def get_file(self, file_id: str) -> RunFiles | None: return self.files.get(file_id) @property @@ -172,36 +173,34 @@ def output_as_model(self) -> BaseModel: class FunctionResultEvent(BaseModel): - id: Optional[str] = None + id: str | None = None - type: Optional[Literal["function.result"]] = "function.result" + type: Literal["function.result"] | None = "function.result" result: str tool_call_id: str - created_at: Optional[datetime.datetime] = datetime.datetime.now( + created_at: datetime.datetime | None = datetime.datetime.now( tz=datetime.timezone.utc ) - output_index: Optional[int] = 0 + output_index: int | None = 0 -RunResultEventsType = typing.Union[SSETypes, Literal["function.result"]] +RunResultEventsType = SSETypes | Literal["function.result"] RunResultEventsData = typing.Annotated[ - Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], - Annotated[FunctionResultEvent, Tag("function.result")], - ], + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")] + | Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")] + | Annotated[ResponseDoneEvent, Tag("conversation.response.done")] + | Annotated[ResponseErrorEvent, Tag("conversation.response.error")] + | Annotated[ResponseStartedEvent, Tag("conversation.response.started")] + | Annotated[FunctionCallEvent, Tag("function.call.delta")] + | Annotated[MessageOutputEvent, Tag("message.output.delta")] + | Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")] + | Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")] + | Annotated[FunctionResultEvent, Tag("function.result")], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index 81fec665..18c1d3dd 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -1,13 +1,11 @@ +import inspect import itertools +import json import logging from dataclasses import dataclass -import inspect - -from pydantic import Field, create_model -from pydantic.fields import FieldInfo -import json -from typing import cast, Callable, Sequence, Any, ForwardRef, get_type_hints, Union +from typing import Any, Callable, ForwardRef, Sequence, cast, get_type_hints +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes from griffe import ( Docstring, DocstringSectionKind, @@ -15,11 +13,15 @@ DocstringParameter, DocstringSection, ) +from opentelemetry import trace +from pydantic import Field, create_model +from pydantic.fields import FieldInfo from mistralai.extra.exceptions import RunException from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.observability.otel import GenAISpanEnum, MistralAIAttributes, set_available_attributes from mistralai.extra.run.result import RunOutputEntries -from mistralai.models import ( +from mistralai.client.models import ( FunctionResultEntry, FunctionTool, Function, @@ -51,7 +53,7 @@ class RunMCPTool: mcp_client: MCPClientProtocol -RunTool = Union[RunFunction, RunCoroutine, RunMCPTool] +RunTool = RunFunction | RunCoroutine | RunMCPTool def _get_function_description(docstring_sections: list[DocstringSection]) -> str: @@ -166,7 +168,6 @@ def create_tool_call(func: Callable) -> FunctionTool: type_hints = get_type_hints(func, include_extras=True, localns=None, globalns=None) return FunctionTool( - type="function", function=Function( name=name, description=_get_function_description(docstring_sections), @@ -191,22 +192,31 @@ async def create_function_result( if isinstance(function_call.arguments, str) else function_call.arguments ) - try: - if isinstance(run_tool, RunFunction): - res = run_tool.callable(**arguments) - elif isinstance(run_tool, RunCoroutine): - res = await run_tool.awaitable(**arguments) - elif isinstance(run_tool, RunMCPTool): - res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) - except Exception as e: - if continue_on_fn_error is True: - return FunctionResultEntry( - tool_call_id=function_call.tool_call_id, - result=f"Error while executing {function_call.name}: {str(e)}", - ) - raise RunException( - f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" - ) from e + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span(GenAISpanEnum.function_call(function_call.name)) as span: + try: + if isinstance(run_tool, RunFunction): + res = run_tool.callable(**arguments) + elif isinstance(run_tool, RunCoroutine): + res = await run_tool.awaitable(**arguments) + elif isinstance(run_tool, RunMCPTool): + res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) + function_call_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, + gen_ai_attributes.GEN_AI_TOOL_CALL_ID: function_call.id, + MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: str(function_call.arguments), + gen_ai_attributes.GEN_AI_TOOL_NAME: function_call.name + } + set_available_attributes(span, function_call_attributes) + except Exception as e: + if continue_on_fn_error is True: + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=f"Error while executing {function_call.name}: {str(e)}", + ) + raise RunException( + f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" + ) from e return FunctionResultEntry( tool_call_id=function_call.tool_call_id, diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py index 364b450f..d3fd3f5a 100644 --- a/src/mistralai/extra/struct_chat.py +++ b/src/mistralai/extra/struct_chat.py @@ -1,19 +1,26 @@ -from ..models import ChatCompletionResponse, ChatCompletionChoice, AssistantMessage -from .utils.response_format import CustomPydanticModel, pydantic_model_from_json -from typing import List, Optional, Type, Generic -from pydantic import BaseModel import json +from typing import Generic + +from mistralai.client.models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse +from .utils.response_format import CustomPydanticModel, pydantic_model_from_json + class ParsedAssistantMessage(AssistantMessage, Generic[CustomPydanticModel]): - parsed: Optional[CustomPydanticModel] + parsed: CustomPydanticModel | None + class ParsedChatCompletionChoice(ChatCompletionChoice, Generic[CustomPydanticModel]): - message: Optional[ParsedAssistantMessage[CustomPydanticModel]] # type: ignore + message: ParsedAssistantMessage[CustomPydanticModel] | None # type: ignore + class ParsedChatCompletionResponse(ChatCompletionResponse, Generic[CustomPydanticModel]): - choices: Optional[List[ParsedChatCompletionChoice[CustomPydanticModel]]] # type: ignore + choices: list[ParsedChatCompletionChoice[CustomPydanticModel]] | None # type: ignore + -def convert_to_parsed_chat_completion_response(response: ChatCompletionResponse, response_format: Type[BaseModel]) -> ParsedChatCompletionResponse: +def convert_to_parsed_chat_completion_response( + response: ChatCompletionResponse, + response_format: type[CustomPydanticModel], +) -> ParsedChatCompletionResponse[CustomPydanticModel]: parsed_choices = [] if response.choices: diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py index dd529ba5..7b79bf77 100644 --- a/src/mistralai/extra/tests/test_struct_chat.py +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -5,7 +5,7 @@ ParsedChatCompletionChoice, ParsedAssistantMessage, ) -from ...models import ( +from mistralai.client.models import ( ChatCompletionResponse, UsageInfo, ChatCompletionChoice, diff --git a/src/mistralai/extra/tests/test_utils.py b/src/mistralai/extra/tests/test_utils.py index 41fa53e3..35523fbd 100644 --- a/src/mistralai/extra/tests/test_utils.py +++ b/src/mistralai/extra/tests/test_utils.py @@ -5,8 +5,8 @@ ) from pydantic import BaseModel, ValidationError -from ...models import ResponseFormat, JSONSchema -from ...types.basemodel import Unset +from mistralai.client.models import ResponseFormat, JSONSchema +from mistralai.client.types.basemodel import Unset import unittest diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py index 67e15912..2378b562 100644 --- a/src/mistralai/extra/utils/response_format.py +++ b/src/mistralai/extra/utils/response_format.py @@ -1,13 +1,14 @@ +from typing import Any, TypeVar + from pydantic import BaseModel -from typing import TypeVar, Any, Type, Dict -from ...models import JSONSchema, ResponseFormat +from mistralai.client.models import JSONSchema, ResponseFormat from ._pydantic_helper import rec_strict_json_schema CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel) def response_format_from_pydantic_model( - model: Type[CustomPydanticModel], + model: type[CustomPydanticModel], ) -> ResponseFormat: """Generate a strict JSON schema from a pydantic model.""" model_schema = rec_strict_json_schema(model.model_json_schema()) @@ -18,7 +19,8 @@ def response_format_from_pydantic_model( def pydantic_model_from_json( - json_data: Dict[str, Any], pydantic_model: Type[CustomPydanticModel] + json_data: dict[str, Any], + pydantic_model: type[CustomPydanticModel], ) -> CustomPydanticModel: """Parse a JSON schema into a pydantic model.""" return pydantic_model.model_validate(json_data) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py deleted file mode 100644 index c57bc68e..00000000 --- a/src/mistralai/fim.py +++ /dev/null @@ -1,564 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, Mapping, Optional, Union - - -class Fim(BaseSDK): - r"""Fill-in-the-middle API.""" - - def complete( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.FIMCompletionRequestStop, - models.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FIMCompletionResponse: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/v1/fim/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def complete_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models.FIMCompletionRequestStop, - models.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FIMCompletionResponse: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fim/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def stream( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models.FIMCompletionStreamRequestStop, - models.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/v1/fim/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def stream_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models.FIMCompletionStreamRequestStop, - models.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fim/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py deleted file mode 100644 index ce3d1389..00000000 --- a/src/mistralai/fine_tuning.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.jobs import Jobs - - -class FineTuning(BaseSDK): - jobs: Jobs - - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.jobs = Jobs(self.sdk_configuration) diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py deleted file mode 100644 index 47b052cb..00000000 --- a/src/mistralai/httpclient.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -# pyright: reportReturnType = false -import asyncio -from typing_extensions import Protocol, runtime_checkable -import httpx -from typing import Any, Optional, Union - - -@runtime_checkable -class HttpClient(Protocol): - def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - def close(self) -> None: - pass - - -@runtime_checkable -class AsyncHttpClient(Protocol): - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - async def aclose(self) -> None: - pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - try: - loop = asyncio.get_running_loop() - asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) - except RuntimeError: - try: - asyncio.run(async_client.aclose()) - except RuntimeError: - # best effort - pass diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py deleted file mode 100644 index 020c40f0..00000000 --- a/src/mistralai/jobs.py +++ /dev/null @@ -1,1137 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from datetime import datetime -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import List, Mapping, Optional, Union - - -class Jobs(BaseSDK): - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_before: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[models.QueryParamStatus] = UNSET, - wandb_project: OptionalNullable[str] = UNSET, - wandb_name: OptionalNullable[str] = UNSET, - suffix: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: - r"""Get Fine Tuning Jobs - - Get a list of fine-tuning jobs for your organization and user. - - :param page: The page number of the results to be returned. - :param page_size: The number of items to return per page. - :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. - :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. - :param created_before: - :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. - :param status: The current job state to filter on. When set, the other results are not displayed. - :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. - :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. - :param suffix: The model suffix to filter on. When set, the other results are not displayed. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( - page=page, - page_size=page_size, - model=model, - created_after=created_after, - created_before=created_before, - created_by_me=created_by_me, - status=status, - wandb_project=wandb_project, - wandb_name=wandb_name, - suffix=suffix, - ) - - req = self._build_request( - method="GET", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.JobsOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_before: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[models.QueryParamStatus] = UNSET, - wandb_project: OptionalNullable[str] = UNSET, - wandb_name: OptionalNullable[str] = UNSET, - suffix: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: - r"""Get Fine Tuning Jobs - - Get a list of fine-tuning jobs for your organization and user. - - :param page: The page number of the results to be returned. - :param page_size: The number of items to return per page. - :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. - :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. - :param created_before: - :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. - :param status: The current job state to filter on. When set, the other results are not displayed. - :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. - :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. - :param suffix: The model suffix to filter on. When set, the other results are not displayed. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( - page=page, - page_size=page_size, - model=model, - created_after=created_after, - created_before=created_before, - created_by_me=created_by_me, - status=status, - wandb_project=wandb_project, - wandb_name=wandb_name, - suffix=suffix, - ) - - req = self._build_request_async( - method="GET", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.JobsOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def create( - self, - *, - model: str, - hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], - training_files: Optional[ - Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] - ] = None, - validation_files: OptionalNullable[List[str]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[ - Union[ - List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] - ] - ] = UNSET, - auto_start: Optional[bool] = None, - invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, - repositories: OptionalNullable[ - Union[ - List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] - ] - ] = UNSET, - classifier_targets: OptionalNullable[ - Union[ - List[models.ClassifierTargetIn], - List[models.ClassifierTargetInTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: - r"""Create Fine Tuning Job - - Create a new fine-tuning job, it will be queued for processing. - - :param model: The name of the model to fine-tune. - :param hyperparameters: - :param training_files: - :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. - :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` - :param integrations: A list of integrations to enable for your fine-tuning job. - :param auto_start: This field will be required in a future release. - :param invalid_sample_skip_percentage: - :param job_type: - :param repositories: - :param classifier_targets: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobIn( - model=model, - training_files=utils.get_pydantic_model( - training_files, Optional[List[models.TrainingFile]] - ), - validation_files=validation_files, - suffix=suffix, - integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] - ), - auto_start=auto_start, - invalid_sample_skip_percentage=invalid_sample_skip_percentage, - job_type=job_type, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.Hyperparameters - ), - repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] - ), - classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] - ), - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def create_async( - self, - *, - model: str, - hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], - training_files: Optional[ - Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] - ] = None, - validation_files: OptionalNullable[List[str]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[ - Union[ - List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] - ] - ] = UNSET, - auto_start: Optional[bool] = None, - invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, - repositories: OptionalNullable[ - Union[ - List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] - ] - ] = UNSET, - classifier_targets: OptionalNullable[ - Union[ - List[models.ClassifierTargetIn], - List[models.ClassifierTargetInTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: - r"""Create Fine Tuning Job - - Create a new fine-tuning job, it will be queued for processing. - - :param model: The name of the model to fine-tune. - :param hyperparameters: - :param training_files: - :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. - :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` - :param integrations: A list of integrations to enable for your fine-tuning job. - :param auto_start: This field will be required in a future release. - :param invalid_sample_skip_percentage: - :param job_type: - :param repositories: - :param classifier_targets: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobIn( - model=model, - training_files=utils.get_pydantic_model( - training_files, Optional[List[models.TrainingFile]] - ), - validation_files=validation_files, - suffix=suffix, - integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] - ), - auto_start=auto_start, - invalid_sample_skip_percentage=invalid_sample_skip_percentage, - job_type=job_type, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.Hyperparameters - ), - repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] - ), - classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def get( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: - r"""Get Fine Tuning Job - - Get a fine-tuned job details by its UUID. - - :param job_id: The ID of the job to analyse. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="GET", - path="/v1/fine_tuning/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def get_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: - r"""Get Fine Tuning Job - - Get a fine-tuned job details by its UUID. - - :param job_id: The ID of the job to analyse. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/fine_tuning/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def cancel( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: - r"""Cancel Fine Tuning Job - - Request the cancellation of a fine tuning job. - - :param job_id: The ID of the job to cancel. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def cancel_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: - r"""Cancel Fine Tuning Job - - Request the cancellation of a fine tuning job. - - :param job_id: The ID of the job to cancel. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def start( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: - r"""Start Fine Tuning Job - - Request the start of a validated fine tuning job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/start", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def start_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: - r"""Start Fine Tuning Job - - Request the start of a validated fine tuning job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/start", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py deleted file mode 100644 index f0d4be01..00000000 --- a/src/mistralai/mistral_agents.py +++ /dev/null @@ -1,1170 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import Any, List, Mapping, Optional, Union - - -class MistralAgents(BaseSDK): - r"""(beta) Agents API""" - - def create( - self, - *, - model: str, - name: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models.AgentCreationRequestTools], - List[models.AgentCreationRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Create a agent that can be used within a conversation. - - Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - - :param model: - :param name: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param description: - :param handoffs: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentCreationRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - ) - - req = self._build_request( - method="POST", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def create_async( - self, - *, - model: str, - name: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models.AgentCreationRequestTools], - List[models.AgentCreationRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Create a agent that can be used within a conversation. - - Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - - :param model: - :param name: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param description: - :param handoffs: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentCreationRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List agent entities. - - Retrieve a list of agent entities sorted by creation time. - - :param page: - :param page_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListRequest( - page=page, - page_size=page_size, - ) - - req = self._build_request( - method="GET", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.Agent]) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List agent entities. - - Retrieve a list of agent entities sorted by creation time. - - :param page: - :param page_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListRequest( - page=page, - page_size=page_size, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.Agent]) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def get( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve an agent entity. - - Given an agent retrieve an agent entity with its attributes. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetRequest( - agent_id=agent_id, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def get_async( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve an agent entity. - - Given an agent retrieve an agent entity with its attributes. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetRequest( - agent_id=agent_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def update( - self, - *, - agent_id: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models.AgentUpdateRequestTools], - List[models.AgentUpdateRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - model: OptionalNullable[str] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent entity. - - Update an agent attributes and create a new version. - - :param agent_id: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param model: - :param name: - :param description: - :param handoffs: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateRequest( - agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - ), - ) - - req = self._build_request( - method="PATCH", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, - False, - False, - "json", - models.AgentUpdateRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def update_async( - self, - *, - agent_id: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models.AgentUpdateRequestTools], - List[models.AgentUpdateRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = None, - model: OptionalNullable[str] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent entity. - - Update an agent attributes and create a new version. - - :param agent_id: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param model: - :param name: - :param description: - :param handoffs: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateRequest( - agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - ), - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, - False, - False, - "json", - models.AgentUpdateRequest, - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def update_version( - self, - *, - agent_id: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent version. - - Switch the version of an agent. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request( - method="PATCH", - path="/v1/agents/{agent_id}/version", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def update_version_async( - self, - *, - agent_id: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent version. - - Switch the version of an agent. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/agents/{agent_id}/version", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py deleted file mode 100644 index c51d64a7..00000000 --- a/src/mistralai/mistral_jobs.py +++ /dev/null @@ -1,841 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from datetime import datetime -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import Any, Dict, List, Mapping, Optional - - -class MistralJobs(BaseSDK): - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: - r"""Get Batch Jobs - - Get a list of batch jobs for your organization and user. - - :param page: - :param page_size: - :param model: - :param agent_id: - :param metadata: - :param created_after: - :param created_by_me: - :param status: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( - page=page, - page_size=page_size, - model=model, - agent_id=agent_id, - metadata=metadata, - created_after=created_after, - created_by_me=created_by_me, - status=status, - ) - - req = self._build_request( - method="GET", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobsOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: - r"""Get Batch Jobs - - Get a list of batch jobs for your organization and user. - - :param page: - :param page_size: - :param model: - :param agent_id: - :param metadata: - :param created_after: - :param created_by_me: - :param status: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( - page=page, - page_size=page_size, - model=model, - agent_id=agent_id, - metadata=metadata, - created_after=created_after, - created_by_me=created_by_me, - status=status, - ) - - req = self._build_request_async( - method="GET", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobsOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def create( - self, - *, - input_files: List[str], - endpoint: models.APIEndpoint, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, str]] = UNSET, - timeout_hours: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Create Batch Job - - Create a new batch job, it will be queued for processing. - - :param input_files: - :param endpoint: - :param model: - :param agent_id: - :param metadata: - :param timeout_hours: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.BatchJobIn( - input_files=input_files, - endpoint=endpoint, - model=model, - agent_id=agent_id, - metadata=metadata, - timeout_hours=timeout_hours, - ) - - req = self._build_request( - method="POST", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def create_async( - self, - *, - input_files: List[str], - endpoint: models.APIEndpoint, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, str]] = UNSET, - timeout_hours: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Create Batch Job - - Create a new batch job, it will be queued for processing. - - :param input_files: - :param endpoint: - :param model: - :param agent_id: - :param metadata: - :param timeout_hours: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.BatchJobIn( - input_files=input_files, - endpoint=endpoint, - model=model, - agent_id=agent_id, - metadata=metadata, - timeout_hours=timeout_hours, - ) - - req = self._build_request_async( - method="POST", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def get( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Get Batch Job - - Get a batch job details by its UUID. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="GET", - path="/v1/batch/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def get_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Get Batch Job - - Get a batch job details by its UUID. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/batch/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - def cancel( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Cancel Batch Job - - Request the cancellation of a batch job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/batch/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def cancel_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Cancel Batch Job - - Request the cancellation of a batch job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/batch/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py deleted file mode 100644 index 2039c2b6..00000000 --- a/src/mistralai/models/__init__.py +++ /dev/null @@ -1,2287 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import TYPE_CHECKING -from importlib import import_module - -if TYPE_CHECKING: - from .agent import ( - Agent, - AgentObject, - AgentTools, - AgentToolsTypedDict, - AgentTypedDict, - ) - from .agentconversation import ( - AgentConversation, - AgentConversationObject, - AgentConversationTypedDict, - ) - from .agentcreationrequest import ( - AgentCreationRequest, - AgentCreationRequestTools, - AgentCreationRequestToolsTypedDict, - AgentCreationRequestTypedDict, - ) - from .agenthandoffdoneevent import ( - AgentHandoffDoneEvent, - AgentHandoffDoneEventType, - AgentHandoffDoneEventTypedDict, - ) - from .agenthandoffentry import ( - AgentHandoffEntry, - AgentHandoffEntryObject, - AgentHandoffEntryType, - AgentHandoffEntryTypedDict, - ) - from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventType, - AgentHandoffStartedEventTypedDict, - ) - from .agents_api_v1_agents_getop import ( - AgentsAPIV1AgentsGetRequest, - AgentsAPIV1AgentsGetRequestTypedDict, - ) - from .agents_api_v1_agents_listop import ( - AgentsAPIV1AgentsListRequest, - AgentsAPIV1AgentsListRequestTypedDict, - ) - from .agents_api_v1_agents_update_versionop import ( - AgentsAPIV1AgentsUpdateVersionRequest, - AgentsAPIV1AgentsUpdateVersionRequestTypedDict, - ) - from .agents_api_v1_agents_updateop import ( - AgentsAPIV1AgentsUpdateRequest, - AgentsAPIV1AgentsUpdateRequestTypedDict, - ) - from .agents_api_v1_conversations_append_streamop import ( - AgentsAPIV1ConversationsAppendStreamRequest, - AgentsAPIV1ConversationsAppendStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_appendop import ( - AgentsAPIV1ConversationsAppendRequest, - AgentsAPIV1ConversationsAppendRequestTypedDict, - ) - from .agents_api_v1_conversations_getop import ( - AgentsAPIV1ConversationsGetRequest, - AgentsAPIV1ConversationsGetRequestTypedDict, - AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, - ) - from .agents_api_v1_conversations_historyop import ( - AgentsAPIV1ConversationsHistoryRequest, - AgentsAPIV1ConversationsHistoryRequestTypedDict, - ) - from .agents_api_v1_conversations_listop import ( - AgentsAPIV1ConversationsListRequest, - AgentsAPIV1ConversationsListRequestTypedDict, - ResponseBody, - ResponseBodyTypedDict, - ) - from .agents_api_v1_conversations_messagesop import ( - AgentsAPIV1ConversationsMessagesRequest, - AgentsAPIV1ConversationsMessagesRequestTypedDict, - ) - from .agents_api_v1_conversations_restart_streamop import ( - AgentsAPIV1ConversationsRestartStreamRequest, - AgentsAPIV1ConversationsRestartStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_restartop import ( - AgentsAPIV1ConversationsRestartRequest, - AgentsAPIV1ConversationsRestartRequestTypedDict, - ) - from .agentscompletionrequest import ( - AgentsCompletionRequest, - AgentsCompletionRequestMessages, - AgentsCompletionRequestMessagesTypedDict, - AgentsCompletionRequestStop, - AgentsCompletionRequestStopTypedDict, - AgentsCompletionRequestToolChoice, - AgentsCompletionRequestToolChoiceTypedDict, - AgentsCompletionRequestTypedDict, - ) - from .agentscompletionstreamrequest import ( - AgentsCompletionStreamRequest, - AgentsCompletionStreamRequestMessages, - AgentsCompletionStreamRequestMessagesTypedDict, - AgentsCompletionStreamRequestStop, - AgentsCompletionStreamRequestStopTypedDict, - AgentsCompletionStreamRequestToolChoice, - AgentsCompletionStreamRequestToolChoiceTypedDict, - AgentsCompletionStreamRequestTypedDict, - ) - from .agentupdaterequest import ( - AgentUpdateRequest, - AgentUpdateRequestTools, - AgentUpdateRequestToolsTypedDict, - AgentUpdateRequestTypedDict, - ) - from .apiendpoint import APIEndpoint - from .archiveftmodelout import ( - ArchiveFTModelOut, - ArchiveFTModelOutObject, - ArchiveFTModelOutTypedDict, - ) - from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, - ) - from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict - from .audiotranscriptionrequest import ( - AudioTranscriptionRequest, - AudioTranscriptionRequestTypedDict, - ) - from .audiotranscriptionrequeststream import ( - AudioTranscriptionRequestStream, - AudioTranscriptionRequestStreamTypedDict, - ) - from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict - from .batcherror import BatchError, BatchErrorTypedDict - from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict - from .batchjobstatus import BatchJobStatus - from .builtinconnectors import BuiltInConnectors - from .chatclassificationrequest import ( - ChatClassificationRequest, - ChatClassificationRequestTypedDict, - ) - from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceTypedDict, - FinishReason, - ) - from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, - ) - from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, - ) - from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestMessages, - ChatCompletionStreamRequestMessagesTypedDict, - ChatCompletionStreamRequestStop, - ChatCompletionStreamRequestStopTypedDict, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - ) - from .chatmoderationrequest import ( - ChatModerationRequest, - ChatModerationRequestInputs, - ChatModerationRequestInputsTypedDict, - ChatModerationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, - ) - from .checkpointout import CheckpointOut, CheckpointOutTypedDict - from .classificationrequest import ( - ClassificationRequest, - ClassificationRequestInputs, - ClassificationRequestInputsTypedDict, - ClassificationRequestTypedDict, - ) - from .classificationresponse import ( - ClassificationResponse, - ClassificationResponseTypedDict, - ) - from .classificationtargetresult import ( - ClassificationTargetResult, - ClassificationTargetResultTypedDict, - ) - from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegrations, - ClassifierDetailedJobOutIntegrationsTypedDict, - ClassifierDetailedJobOutJobType, - ClassifierDetailedJobOutObject, - ClassifierDetailedJobOutStatus, - ClassifierDetailedJobOutTypedDict, - ) - from .classifierftmodelout import ( - ClassifierFTModelOut, - ClassifierFTModelOutModelType, - ClassifierFTModelOutObject, - ClassifierFTModelOutTypedDict, - ) - from .classifierjobout import ( - ClassifierJobOut, - ClassifierJobOutIntegrations, - ClassifierJobOutIntegrationsTypedDict, - ClassifierJobOutJobType, - ClassifierJobOutObject, - ClassifierJobOutStatus, - ClassifierJobOutTypedDict, - ) - from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict - from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict - from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, - ) - from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, - ) - from .codeinterpretertool import ( - CodeInterpreterTool, - CodeInterpreterToolType, - CodeInterpreterToolTypedDict, - ) - from .completionargs import CompletionArgs, CompletionArgsTypedDict - from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict - from .completionchunk import CompletionChunk, CompletionChunkTypedDict - from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutIntegrations, - CompletionDetailedJobOutIntegrationsTypedDict, - CompletionDetailedJobOutJobType, - CompletionDetailedJobOutObject, - CompletionDetailedJobOutRepositories, - CompletionDetailedJobOutRepositoriesTypedDict, - CompletionDetailedJobOutStatus, - CompletionDetailedJobOutTypedDict, - ) - from .completionevent import CompletionEvent, CompletionEventTypedDict - from .completionftmodelout import ( - CompletionFTModelOut, - CompletionFTModelOutObject, - CompletionFTModelOutTypedDict, - ModelType, - ) - from .completionjobout import ( - CompletionJobOut, - CompletionJobOutObject, - CompletionJobOutTypedDict, - Integrations, - IntegrationsTypedDict, - JobType, - Repositories, - RepositoriesTypedDict, - Status, - ) - from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceFinishReason, - CompletionResponseStreamChoiceTypedDict, - ) - from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, - ) - from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, - ) - from .contentchunk import ContentChunk, ContentChunkTypedDict - from .conversationappendrequest import ( - ConversationAppendRequest, - ConversationAppendRequestHandoffExecution, - ConversationAppendRequestTypedDict, - ) - from .conversationappendstreamrequest import ( - ConversationAppendStreamRequest, - ConversationAppendStreamRequestHandoffExecution, - ConversationAppendStreamRequestTypedDict, - ) - from .conversationevents import ( - ConversationEvents, - ConversationEventsData, - ConversationEventsDataTypedDict, - ConversationEventsTypedDict, - ) - from .conversationhistory import ( - ConversationHistory, - ConversationHistoryObject, - ConversationHistoryTypedDict, - Entries, - EntriesTypedDict, - ) - from .conversationinputs import ConversationInputs, ConversationInputsTypedDict - from .conversationmessages import ( - ConversationMessages, - ConversationMessagesObject, - ConversationMessagesTypedDict, - ) - from .conversationrequest import ( - ConversationRequest, - ConversationRequestTypedDict, - HandoffExecution, - Tools, - ToolsTypedDict, - ) - from .conversationresponse import ( - ConversationResponse, - ConversationResponseObject, - ConversationResponseTypedDict, - Outputs, - OutputsTypedDict, - ) - from .conversationrestartrequest import ( - ConversationRestartRequest, - ConversationRestartRequestHandoffExecution, - ConversationRestartRequestTypedDict, - ) - from .conversationrestartstreamrequest import ( - ConversationRestartStreamRequest, - ConversationRestartStreamRequestHandoffExecution, - ConversationRestartStreamRequestTypedDict, - ) - from .conversationstreamrequest import ( - ConversationStreamRequest, - ConversationStreamRequestHandoffExecution, - ConversationStreamRequestTools, - ConversationStreamRequestToolsTypedDict, - ConversationStreamRequestTypedDict, - ) - from .conversationusageinfo import ( - ConversationUsageInfo, - ConversationUsageInfoTypedDict, - ) - from .delete_model_v1_models_model_id_deleteop import ( - DeleteModelV1ModelsModelIDDeleteRequest, - DeleteModelV1ModelsModelIDDeleteRequestTypedDict, - ) - from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict - from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict - from .deltamessage import ( - Content, - ContentTypedDict, - DeltaMessage, - DeltaMessageTypedDict, - ) - from .documentlibrarytool import ( - DocumentLibraryTool, - DocumentLibraryToolType, - DocumentLibraryToolTypedDict, - ) - from .documentout import DocumentOut, DocumentOutTypedDict - from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) - from .embeddingdtype import EmbeddingDtype - from .embeddingrequest import ( - EmbeddingRequest, - EmbeddingRequestInputs, - EmbeddingRequestInputsTypedDict, - EmbeddingRequestTypedDict, - ) - from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict - from .embeddingresponsedata import ( - EmbeddingResponseData, - EmbeddingResponseDataTypedDict, - ) - from .entitytype import EntityType - from .eventout import EventOut, EventOutTypedDict - from .file import File, FileTypedDict - from .filechunk import FileChunk, FileChunkTypedDict - from .filepurpose import FilePurpose - from .files_api_routes_delete_fileop import ( - FilesAPIRoutesDeleteFileRequest, - FilesAPIRoutesDeleteFileRequestTypedDict, - ) - from .files_api_routes_download_fileop import ( - FilesAPIRoutesDownloadFileRequest, - FilesAPIRoutesDownloadFileRequestTypedDict, - ) - from .files_api_routes_get_signed_urlop import ( - FilesAPIRoutesGetSignedURLRequest, - FilesAPIRoutesGetSignedURLRequestTypedDict, - ) - from .files_api_routes_list_filesop import ( - FilesAPIRoutesListFilesRequest, - FilesAPIRoutesListFilesRequestTypedDict, - ) - from .files_api_routes_retrieve_fileop import ( - FilesAPIRoutesRetrieveFileRequest, - FilesAPIRoutesRetrieveFileRequestTypedDict, - ) - from .files_api_routes_upload_fileop import ( - FilesAPIRoutesUploadFileMultiPartBodyParams, - FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, - ) - from .fileschema import FileSchema, FileSchemaTypedDict - from .filesignedurl import FileSignedURL, FileSignedURLTypedDict - from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, - ) - from .fimcompletionresponse import ( - FIMCompletionResponse, - FIMCompletionResponseTypedDict, - ) - from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, - ) - from .finetuneablemodeltype import FineTuneableModelType - from .ftclassifierlossfunction import FTClassifierLossFunction - from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, - ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict - from .function import Function, FunctionTypedDict - from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, - ) - from .functioncallentry import ( - FunctionCallEntry, - FunctionCallEntryObject, - FunctionCallEntryType, - FunctionCallEntryTypedDict, - ) - from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, - ) - from .functioncallevent import ( - FunctionCallEvent, - FunctionCallEventType, - FunctionCallEventTypedDict, - ) - from .functionname import FunctionName, FunctionNameTypedDict - from .functionresultentry import ( - FunctionResultEntry, - FunctionResultEntryObject, - FunctionResultEntryType, - FunctionResultEntryTypedDict, - ) - from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict - from .githubrepositoryin import ( - GithubRepositoryIn, - GithubRepositoryInType, - GithubRepositoryInTypedDict, - ) - from .githubrepositoryout import ( - GithubRepositoryOut, - GithubRepositoryOutType, - GithubRepositoryOutTypedDict, - ) - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData - from .imagegenerationtool import ( - ImageGenerationTool, - ImageGenerationToolType, - ImageGenerationToolTypedDict, - ) - from .imageurl import ImageURL, ImageURLTypedDict - from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, - ) - from .inputentries import InputEntries, InputEntriesTypedDict - from .inputs import ( - Inputs, - InputsTypedDict, - InstructRequestInputs, - InstructRequestInputsMessages, - InstructRequestInputsMessagesTypedDict, - InstructRequestInputsTypedDict, - ) - from .instructrequest import ( - InstructRequest, - InstructRequestMessages, - InstructRequestMessagesTypedDict, - InstructRequestTypedDict, - ) - from .jobin import ( - Hyperparameters, - HyperparametersTypedDict, - JobIn, - JobInIntegrations, - JobInIntegrationsTypedDict, - JobInRepositories, - JobInRepositoriesTypedDict, - JobInTypedDict, - ) - from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobs_api_routes_batch_cancel_batch_jobop import ( - JobsAPIRoutesBatchCancelBatchJobRequest, - JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobop import ( - JobsAPIRoutesBatchGetBatchJobRequest, - JobsAPIRoutesBatchGetBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobsop import ( - JobsAPIRoutesBatchGetBatchJobsRequest, - JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCancelFineTuningJobRequest, - JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningCancelFineTuningJobResponse, - JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCreateFineTuningJobResponse, - JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response1, - Response1TypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningGetFineTuningJobRequest, - JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobResponse, - JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( - JobsAPIRoutesFineTuningGetFineTuningJobsRequest, - JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - QueryParamStatus, - ) - from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningStartFineTuningJobRequest, - JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningStartFineTuningJobResponse, - JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, - JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, - ) - from .jobsout import ( - JobsOut, - JobsOutData, - JobsOutDataTypedDict, - JobsOutObject, - JobsOutTypedDict, - ) - from .jsonschema import JSONSchema, JSONSchemaTypedDict - from .legacyjobmetadataout import ( - LegacyJobMetadataOut, - LegacyJobMetadataOutObject, - LegacyJobMetadataOutTypedDict, - ) - from .libraries_delete_v1op import ( - LibrariesDeleteV1Request, - LibrariesDeleteV1RequestTypedDict, - ) - from .libraries_documents_delete_v1op import ( - LibrariesDocumentsDeleteV1Request, - LibrariesDocumentsDeleteV1RequestTypedDict, - ) - from .libraries_documents_get_extracted_text_signed_url_v1op import ( - LibrariesDocumentsGetExtractedTextSignedURLV1Request, - LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_signed_url_v1op import ( - LibrariesDocumentsGetSignedURLV1Request, - LibrariesDocumentsGetSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_status_v1op import ( - LibrariesDocumentsGetStatusV1Request, - LibrariesDocumentsGetStatusV1RequestTypedDict, - ) - from .libraries_documents_get_text_content_v1op import ( - LibrariesDocumentsGetTextContentV1Request, - LibrariesDocumentsGetTextContentV1RequestTypedDict, - ) - from .libraries_documents_get_v1op import ( - LibrariesDocumentsGetV1Request, - LibrariesDocumentsGetV1RequestTypedDict, - ) - from .libraries_documents_list_v1op import ( - LibrariesDocumentsListV1Request, - LibrariesDocumentsListV1RequestTypedDict, - ) - from .libraries_documents_reprocess_v1op import ( - LibrariesDocumentsReprocessV1Request, - LibrariesDocumentsReprocessV1RequestTypedDict, - ) - from .libraries_documents_update_v1op import ( - LibrariesDocumentsUpdateV1Request, - LibrariesDocumentsUpdateV1RequestTypedDict, - ) - from .libraries_documents_upload_v1op import ( - LibrariesDocumentsUploadV1DocumentUpload, - LibrariesDocumentsUploadV1DocumentUploadTypedDict, - LibrariesDocumentsUploadV1Request, - LibrariesDocumentsUploadV1RequestTypedDict, - ) - from .libraries_get_v1op import ( - LibrariesGetV1Request, - LibrariesGetV1RequestTypedDict, - ) - from .libraries_share_create_v1op import ( - LibrariesShareCreateV1Request, - LibrariesShareCreateV1RequestTypedDict, - ) - from .libraries_share_delete_v1op import ( - LibrariesShareDeleteV1Request, - LibrariesShareDeleteV1RequestTypedDict, - ) - from .libraries_share_list_v1op import ( - LibrariesShareListV1Request, - LibrariesShareListV1RequestTypedDict, - ) - from .libraries_update_v1op import ( - LibrariesUpdateV1Request, - LibrariesUpdateV1RequestTypedDict, - ) - from .libraryin import LibraryIn, LibraryInTypedDict - from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict - from .libraryout import LibraryOut, LibraryOutTypedDict - from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict - from .listfilesout import ListFilesOut, ListFilesOutTypedDict - from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict - from .listsharingout import ListSharingOut, ListSharingOutTypedDict - from .messageentries import MessageEntries, MessageEntriesTypedDict - from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, - ) - from .messageinputentry import ( - MessageInputEntry, - MessageInputEntryContent, - MessageInputEntryContentTypedDict, - MessageInputEntryRole, - MessageInputEntryType, - MessageInputEntryTypedDict, - Object, - ) - from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, - ) - from .messageoutputentry import ( - MessageOutputEntry, - MessageOutputEntryContent, - MessageOutputEntryContentTypedDict, - MessageOutputEntryObject, - MessageOutputEntryRole, - MessageOutputEntryType, - MessageOutputEntryTypedDict, - ) - from .messageoutputevent import ( - MessageOutputEvent, - MessageOutputEventContent, - MessageOutputEventContentTypedDict, - MessageOutputEventRole, - MessageOutputEventType, - MessageOutputEventTypedDict, - ) - from .metricout import MetricOut, MetricOutTypedDict - from .mistralpromptmode import MistralPromptMode - from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict - from .modelconversation import ( - ModelConversation, - ModelConversationObject, - ModelConversationTools, - ModelConversationToolsTypedDict, - ModelConversationTypedDict, - ) - from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict - from .moderationobject import ModerationObject, ModerationObjectTypedDict - from .moderationresponse import ModerationResponse, ModerationResponseTypedDict - from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict - from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict - from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict - from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict - from .ocrresponse import OCRResponse, OCRResponseTypedDict - from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict - from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict - from .paginationinfo import PaginationInfo, PaginationInfoTypedDict - from .prediction import Prediction, PredictionTypedDict - from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) - from .responsedoneevent import ( - ResponseDoneEvent, - ResponseDoneEventType, - ResponseDoneEventTypedDict, - ) - from .responseerrorevent import ( - ResponseErrorEvent, - ResponseErrorEventType, - ResponseErrorEventTypedDict, - ) - from .responseformat import ResponseFormat, ResponseFormatTypedDict - from .responseformats import ResponseFormats - from .responsestartedevent import ( - ResponseStartedEvent, - ResponseStartedEventType, - ResponseStartedEventTypedDict, - ) - from .retrieve_model_v1_models_model_id_getop import ( - RetrieveModelV1ModelsModelIDGetRequest, - RetrieveModelV1ModelsModelIDGetRequestTypedDict, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, - ) - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict - from .sampletype import SampleType - from .sdkerror import SDKError - from .security import Security, SecurityTypedDict - from .shareenum import ShareEnum - from .sharingdelete import SharingDelete, SharingDeleteTypedDict - from .sharingin import SharingIn, SharingInTypedDict - from .sharingout import SharingOut, SharingOutTypedDict - from .source import Source - from .ssetypes import SSETypes - from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, - ) - from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) - from .timestampgranularity import TimestampGranularity - from .tool import Tool, ToolTypedDict - from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict - from .toolchoice import ToolChoice, ToolChoiceTypedDict - from .toolchoiceenum import ToolChoiceEnum - from .toolexecutiondeltaevent import ( - ToolExecutionDeltaEvent, - ToolExecutionDeltaEventType, - ToolExecutionDeltaEventTypedDict, - ) - from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventType, - ToolExecutionDoneEventTypedDict, - ) - from .toolexecutionentry import ( - ToolExecutionEntry, - ToolExecutionEntryObject, - ToolExecutionEntryType, - ToolExecutionEntryTypedDict, - ) - from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventType, - ToolExecutionStartedEventTypedDict, - ) - from .toolfilechunk import ToolFileChunk, ToolFileChunkType, ToolFileChunkTypedDict - from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, - ) - from .toolreferencechunk import ( - ToolReferenceChunk, - ToolReferenceChunkType, - ToolReferenceChunkTypedDict, - ) - from .tooltypes import ToolTypes - from .trainingfile import TrainingFile, TrainingFileTypedDict - from .transcriptionresponse import ( - TranscriptionResponse, - TranscriptionResponseTypedDict, - ) - from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, - Type, - ) - from .transcriptionstreamdone import ( - TranscriptionStreamDone, - TranscriptionStreamDoneType, - TranscriptionStreamDoneTypedDict, - ) - from .transcriptionstreamevents import ( - TranscriptionStreamEvents, - TranscriptionStreamEventsData, - TranscriptionStreamEventsDataTypedDict, - TranscriptionStreamEventsTypedDict, - ) - from .transcriptionstreameventtypes import TranscriptionStreamEventTypes - from .transcriptionstreamlanguage import ( - TranscriptionStreamLanguage, - TranscriptionStreamLanguageType, - TranscriptionStreamLanguageTypedDict, - ) - from .transcriptionstreamsegmentdelta import ( - TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaType, - TranscriptionStreamSegmentDeltaTypedDict, - ) - from .transcriptionstreamtextdelta import ( - TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaType, - TranscriptionStreamTextDeltaTypedDict, - ) - from .unarchiveftmodelout import ( - UnarchiveFTModelOut, - UnarchiveFTModelOutObject, - UnarchiveFTModelOutTypedDict, - ) - from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict - from .uploadfileout import UploadFileOut, UploadFileOutTypedDict - from .usageinfo import UsageInfo, UsageInfoTypedDict - from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, - ) - from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, - ) - from .wandbintegration import ( - WandbIntegration, - WandbIntegrationType, - WandbIntegrationTypedDict, - ) - from .wandbintegrationout import ( - WandbIntegrationOut, - WandbIntegrationOutType, - WandbIntegrationOutTypedDict, - ) - from .websearchpremiumtool import ( - WebSearchPremiumTool, - WebSearchPremiumToolType, - WebSearchPremiumToolTypedDict, - ) - from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict - -__all__ = [ - "APIEndpoint", - "Agent", - "AgentConversation", - "AgentConversationObject", - "AgentConversationTypedDict", - "AgentCreationRequest", - "AgentCreationRequestTools", - "AgentCreationRequestToolsTypedDict", - "AgentCreationRequestTypedDict", - "AgentHandoffDoneEvent", - "AgentHandoffDoneEventType", - "AgentHandoffDoneEventTypedDict", - "AgentHandoffEntry", - "AgentHandoffEntryObject", - "AgentHandoffEntryType", - "AgentHandoffEntryTypedDict", - "AgentHandoffStartedEvent", - "AgentHandoffStartedEventType", - "AgentHandoffStartedEventTypedDict", - "AgentObject", - "AgentTools", - "AgentToolsTypedDict", - "AgentTypedDict", - "AgentUpdateRequest", - "AgentUpdateRequestTools", - "AgentUpdateRequestToolsTypedDict", - "AgentUpdateRequestTypedDict", - "AgentsAPIV1AgentsGetRequest", - "AgentsAPIV1AgentsGetRequestTypedDict", - "AgentsAPIV1AgentsListRequest", - "AgentsAPIV1AgentsListRequestTypedDict", - "AgentsAPIV1AgentsUpdateRequest", - "AgentsAPIV1AgentsUpdateRequestTypedDict", - "AgentsAPIV1AgentsUpdateVersionRequest", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", - "AgentsAPIV1ConversationsAppendRequest", - "AgentsAPIV1ConversationsAppendRequestTypedDict", - "AgentsAPIV1ConversationsAppendStreamRequest", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", - "AgentsAPIV1ConversationsGetRequest", - "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", - "AgentsAPIV1ConversationsHistoryRequest", - "AgentsAPIV1ConversationsHistoryRequestTypedDict", - "AgentsAPIV1ConversationsListRequest", - "AgentsAPIV1ConversationsListRequestTypedDict", - "AgentsAPIV1ConversationsMessagesRequest", - "AgentsAPIV1ConversationsMessagesRequestTypedDict", - "AgentsAPIV1ConversationsRestartRequest", - "AgentsAPIV1ConversationsRestartRequestTypedDict", - "AgentsAPIV1ConversationsRestartStreamRequest", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", - "AgentsCompletionRequest", - "AgentsCompletionRequestMessages", - "AgentsCompletionRequestMessagesTypedDict", - "AgentsCompletionRequestStop", - "AgentsCompletionRequestStopTypedDict", - "AgentsCompletionRequestToolChoice", - "AgentsCompletionRequestToolChoiceTypedDict", - "AgentsCompletionRequestTypedDict", - "AgentsCompletionStreamRequest", - "AgentsCompletionStreamRequestMessages", - "AgentsCompletionStreamRequestMessagesTypedDict", - "AgentsCompletionStreamRequestStop", - "AgentsCompletionStreamRequestStopTypedDict", - "AgentsCompletionStreamRequestToolChoice", - "AgentsCompletionStreamRequestToolChoiceTypedDict", - "AgentsCompletionStreamRequestTypedDict", - "ArchiveFTModelOut", - "ArchiveFTModelOutObject", - "ArchiveFTModelOutTypedDict", - "Arguments", - "ArgumentsTypedDict", - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentTypedDict", - "AssistantMessageRole", - "AssistantMessageTypedDict", - "AudioChunk", - "AudioChunkType", - "AudioChunkTypedDict", - "AudioTranscriptionRequest", - "AudioTranscriptionRequestStream", - "AudioTranscriptionRequestStreamTypedDict", - "AudioTranscriptionRequestTypedDict", - "BaseModelCard", - "BaseModelCardType", - "BaseModelCardTypedDict", - "BatchError", - "BatchErrorTypedDict", - "BatchJobIn", - "BatchJobInTypedDict", - "BatchJobOut", - "BatchJobOutObject", - "BatchJobOutTypedDict", - "BatchJobStatus", - "BatchJobsOut", - "BatchJobsOutObject", - "BatchJobsOutTypedDict", - "BuiltInConnectors", - "ChatClassificationRequest", - "ChatClassificationRequestTypedDict", - "ChatCompletionChoice", - "ChatCompletionChoiceTypedDict", - "ChatCompletionRequest", - "ChatCompletionRequestToolChoice", - "ChatCompletionRequestToolChoiceTypedDict", - "ChatCompletionRequestTypedDict", - "ChatCompletionResponse", - "ChatCompletionResponseTypedDict", - "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestMessages", - "ChatCompletionStreamRequestMessagesTypedDict", - "ChatCompletionStreamRequestStop", - "ChatCompletionStreamRequestStopTypedDict", - "ChatCompletionStreamRequestToolChoice", - "ChatCompletionStreamRequestToolChoiceTypedDict", - "ChatCompletionStreamRequestTypedDict", - "ChatModerationRequest", - "ChatModerationRequestInputs", - "ChatModerationRequestInputsTypedDict", - "ChatModerationRequestTypedDict", - "CheckpointOut", - "CheckpointOutTypedDict", - "ClassificationRequest", - "ClassificationRequestInputs", - "ClassificationRequestInputsTypedDict", - "ClassificationRequestTypedDict", - "ClassificationResponse", - "ClassificationResponseTypedDict", - "ClassificationTargetResult", - "ClassificationTargetResultTypedDict", - "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegrations", - "ClassifierDetailedJobOutIntegrationsTypedDict", - "ClassifierDetailedJobOutJobType", - "ClassifierDetailedJobOutObject", - "ClassifierDetailedJobOutStatus", - "ClassifierDetailedJobOutTypedDict", - "ClassifierFTModelOut", - "ClassifierFTModelOutModelType", - "ClassifierFTModelOutObject", - "ClassifierFTModelOutTypedDict", - "ClassifierJobOut", - "ClassifierJobOutIntegrations", - "ClassifierJobOutIntegrationsTypedDict", - "ClassifierJobOutJobType", - "ClassifierJobOutObject", - "ClassifierJobOutStatus", - "ClassifierJobOutTypedDict", - "ClassifierTargetIn", - "ClassifierTargetInTypedDict", - "ClassifierTargetOut", - "ClassifierTargetOutTypedDict", - "ClassifierTrainingParameters", - "ClassifierTrainingParametersIn", - "ClassifierTrainingParametersInTypedDict", - "ClassifierTrainingParametersTypedDict", - "CodeInterpreterTool", - "CodeInterpreterToolType", - "CodeInterpreterToolTypedDict", - "CompletionArgs", - "CompletionArgsStop", - "CompletionArgsStopTypedDict", - "CompletionArgsTypedDict", - "CompletionChunk", - "CompletionChunkTypedDict", - "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegrations", - "CompletionDetailedJobOutIntegrationsTypedDict", - "CompletionDetailedJobOutJobType", - "CompletionDetailedJobOutObject", - "CompletionDetailedJobOutRepositories", - "CompletionDetailedJobOutRepositoriesTypedDict", - "CompletionDetailedJobOutStatus", - "CompletionDetailedJobOutTypedDict", - "CompletionEvent", - "CompletionEventTypedDict", - "CompletionFTModelOut", - "CompletionFTModelOutObject", - "CompletionFTModelOutTypedDict", - "CompletionJobOut", - "CompletionJobOutObject", - "CompletionJobOutTypedDict", - "CompletionResponseStreamChoice", - "CompletionResponseStreamChoiceFinishReason", - "CompletionResponseStreamChoiceTypedDict", - "CompletionTrainingParameters", - "CompletionTrainingParametersIn", - "CompletionTrainingParametersInTypedDict", - "CompletionTrainingParametersTypedDict", - "Content", - "ContentChunk", - "ContentChunkTypedDict", - "ContentTypedDict", - "ConversationAppendRequest", - "ConversationAppendRequestHandoffExecution", - "ConversationAppendRequestTypedDict", - "ConversationAppendStreamRequest", - "ConversationAppendStreamRequestHandoffExecution", - "ConversationAppendStreamRequestTypedDict", - "ConversationEvents", - "ConversationEventsData", - "ConversationEventsDataTypedDict", - "ConversationEventsTypedDict", - "ConversationHistory", - "ConversationHistoryObject", - "ConversationHistoryTypedDict", - "ConversationInputs", - "ConversationInputsTypedDict", - "ConversationMessages", - "ConversationMessagesObject", - "ConversationMessagesTypedDict", - "ConversationRequest", - "ConversationRequestTypedDict", - "ConversationResponse", - "ConversationResponseObject", - "ConversationResponseTypedDict", - "ConversationRestartRequest", - "ConversationRestartRequestHandoffExecution", - "ConversationRestartRequestTypedDict", - "ConversationRestartStreamRequest", - "ConversationRestartStreamRequestHandoffExecution", - "ConversationRestartStreamRequestTypedDict", - "ConversationStreamRequest", - "ConversationStreamRequestHandoffExecution", - "ConversationStreamRequestTools", - "ConversationStreamRequestToolsTypedDict", - "ConversationStreamRequestTypedDict", - "ConversationUsageInfo", - "ConversationUsageInfoTypedDict", - "Data", - "DataTypedDict", - "DeleteFileOut", - "DeleteFileOutTypedDict", - "DeleteModelOut", - "DeleteModelOutTypedDict", - "DeleteModelV1ModelsModelIDDeleteRequest", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", - "DeltaMessage", - "DeltaMessageTypedDict", - "Document", - "DocumentLibraryTool", - "DocumentLibraryToolType", - "DocumentLibraryToolTypedDict", - "DocumentOut", - "DocumentOutTypedDict", - "DocumentTextContent", - "DocumentTextContentTypedDict", - "DocumentTypedDict", - "DocumentURLChunk", - "DocumentURLChunkType", - "DocumentURLChunkTypedDict", - "DocumentUpdateIn", - "DocumentUpdateInTypedDict", - "EmbeddingDtype", - "EmbeddingRequest", - "EmbeddingRequestInputs", - "EmbeddingRequestInputsTypedDict", - "EmbeddingRequestTypedDict", - "EmbeddingResponse", - "EmbeddingResponseData", - "EmbeddingResponseDataTypedDict", - "EmbeddingResponseTypedDict", - "EntityType", - "Entries", - "EntriesTypedDict", - "EventOut", - "EventOutTypedDict", - "FIMCompletionRequest", - "FIMCompletionRequestStop", - "FIMCompletionRequestStopTypedDict", - "FIMCompletionRequestTypedDict", - "FIMCompletionResponse", - "FIMCompletionResponseTypedDict", - "FIMCompletionStreamRequest", - "FIMCompletionStreamRequestStop", - "FIMCompletionStreamRequestStopTypedDict", - "FIMCompletionStreamRequestTypedDict", - "FTClassifierLossFunction", - "FTModelCapabilitiesOut", - "FTModelCapabilitiesOutTypedDict", - "FTModelCard", - "FTModelCardType", - "FTModelCardTypedDict", - "File", - "FileChunk", - "FileChunkTypedDict", - "FilePurpose", - "FileSchema", - "FileSchemaTypedDict", - "FileSignedURL", - "FileSignedURLTypedDict", - "FileTypedDict", - "FilesAPIRoutesDeleteFileRequest", - "FilesAPIRoutesDeleteFileRequestTypedDict", - "FilesAPIRoutesDownloadFileRequest", - "FilesAPIRoutesDownloadFileRequestTypedDict", - "FilesAPIRoutesGetSignedURLRequest", - "FilesAPIRoutesGetSignedURLRequestTypedDict", - "FilesAPIRoutesListFilesRequest", - "FilesAPIRoutesListFilesRequestTypedDict", - "FilesAPIRoutesRetrieveFileRequest", - "FilesAPIRoutesRetrieveFileRequestTypedDict", - "FilesAPIRoutesUploadFileMultiPartBodyParams", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", - "FineTuneableModelType", - "FinishReason", - "Function", - "FunctionCall", - "FunctionCallEntry", - "FunctionCallEntryArguments", - "FunctionCallEntryArgumentsTypedDict", - "FunctionCallEntryObject", - "FunctionCallEntryType", - "FunctionCallEntryTypedDict", - "FunctionCallEvent", - "FunctionCallEventType", - "FunctionCallEventTypedDict", - "FunctionCallTypedDict", - "FunctionName", - "FunctionNameTypedDict", - "FunctionResultEntry", - "FunctionResultEntryObject", - "FunctionResultEntryType", - "FunctionResultEntryTypedDict", - "FunctionTool", - "FunctionToolType", - "FunctionToolTypedDict", - "FunctionTypedDict", - "GithubRepositoryIn", - "GithubRepositoryInType", - "GithubRepositoryInTypedDict", - "GithubRepositoryOut", - "GithubRepositoryOutType", - "GithubRepositoryOutTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", - "HandoffExecution", - "Hyperparameters", - "HyperparametersTypedDict", - "ImageGenerationTool", - "ImageGenerationToolType", - "ImageGenerationToolTypedDict", - "ImageURL", - "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", - "ImageURLChunkTypedDict", - "ImageURLTypedDict", - "InputEntries", - "InputEntriesTypedDict", - "Inputs", - "InputsTypedDict", - "InstructRequest", - "InstructRequestInputs", - "InstructRequestInputsMessages", - "InstructRequestInputsMessagesTypedDict", - "InstructRequestInputsTypedDict", - "InstructRequestMessages", - "InstructRequestMessagesTypedDict", - "InstructRequestTypedDict", - "Integrations", - "IntegrationsTypedDict", - "JSONSchema", - "JSONSchemaTypedDict", - "JobIn", - "JobInIntegrations", - "JobInIntegrationsTypedDict", - "JobInRepositories", - "JobInRepositoriesTypedDict", - "JobInTypedDict", - "JobMetadataOut", - "JobMetadataOutTypedDict", - "JobType", - "JobsAPIRoutesBatchCancelBatchJobRequest", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobRequest", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobsRequest", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", - "JobsOut", - "JobsOutData", - "JobsOutDataTypedDict", - "JobsOutObject", - "JobsOutTypedDict", - "LegacyJobMetadataOut", - "LegacyJobMetadataOutObject", - "LegacyJobMetadataOutTypedDict", - "LibrariesDeleteV1Request", - "LibrariesDeleteV1RequestTypedDict", - "LibrariesDocumentsDeleteV1Request", - "LibrariesDocumentsDeleteV1RequestTypedDict", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetSignedURLV1Request", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetStatusV1Request", - "LibrariesDocumentsGetStatusV1RequestTypedDict", - "LibrariesDocumentsGetTextContentV1Request", - "LibrariesDocumentsGetTextContentV1RequestTypedDict", - "LibrariesDocumentsGetV1Request", - "LibrariesDocumentsGetV1RequestTypedDict", - "LibrariesDocumentsListV1Request", - "LibrariesDocumentsListV1RequestTypedDict", - "LibrariesDocumentsReprocessV1Request", - "LibrariesDocumentsReprocessV1RequestTypedDict", - "LibrariesDocumentsUpdateV1Request", - "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1DocumentUpload", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict", - "LibrariesDocumentsUploadV1Request", - "LibrariesDocumentsUploadV1RequestTypedDict", - "LibrariesGetV1Request", - "LibrariesGetV1RequestTypedDict", - "LibrariesShareCreateV1Request", - "LibrariesShareCreateV1RequestTypedDict", - "LibrariesShareDeleteV1Request", - "LibrariesShareDeleteV1RequestTypedDict", - "LibrariesShareListV1Request", - "LibrariesShareListV1RequestTypedDict", - "LibrariesUpdateV1Request", - "LibrariesUpdateV1RequestTypedDict", - "LibraryIn", - "LibraryInTypedDict", - "LibraryInUpdate", - "LibraryInUpdateTypedDict", - "LibraryOut", - "LibraryOutTypedDict", - "ListDocumentOut", - "ListDocumentOutTypedDict", - "ListFilesOut", - "ListFilesOutTypedDict", - "ListLibraryOut", - "ListLibraryOutTypedDict", - "ListSharingOut", - "ListSharingOutTypedDict", - "Loc", - "LocTypedDict", - "MessageEntries", - "MessageEntriesTypedDict", - "MessageInputContentChunks", - "MessageInputContentChunksTypedDict", - "MessageInputEntry", - "MessageInputEntryContent", - "MessageInputEntryContentTypedDict", - "MessageInputEntryRole", - "MessageInputEntryType", - "MessageInputEntryTypedDict", - "MessageOutputContentChunks", - "MessageOutputContentChunksTypedDict", - "MessageOutputEntry", - "MessageOutputEntryContent", - "MessageOutputEntryContentTypedDict", - "MessageOutputEntryObject", - "MessageOutputEntryRole", - "MessageOutputEntryType", - "MessageOutputEntryTypedDict", - "MessageOutputEvent", - "MessageOutputEventContent", - "MessageOutputEventContentTypedDict", - "MessageOutputEventRole", - "MessageOutputEventType", - "MessageOutputEventTypedDict", - "Messages", - "MessagesTypedDict", - "Metadata", - "MetadataTypedDict", - "MetricOut", - "MetricOutTypedDict", - "MistralPromptMode", - "ModelCapabilities", - "ModelCapabilitiesTypedDict", - "ModelConversation", - "ModelConversationObject", - "ModelConversationTools", - "ModelConversationToolsTypedDict", - "ModelConversationTypedDict", - "ModelList", - "ModelListTypedDict", - "ModelType", - "ModerationObject", - "ModerationObjectTypedDict", - "ModerationResponse", - "ModerationResponseTypedDict", - "OCRImageObject", - "OCRImageObjectTypedDict", - "OCRPageDimensions", - "OCRPageDimensionsTypedDict", - "OCRPageObject", - "OCRPageObjectTypedDict", - "OCRRequest", - "OCRRequestTypedDict", - "OCRResponse", - "OCRResponseTypedDict", - "OCRUsageInfo", - "OCRUsageInfoTypedDict", - "Object", - "One", - "OneTypedDict", - "OutputContentChunks", - "OutputContentChunksTypedDict", - "Outputs", - "OutputsTypedDict", - "PaginationInfo", - "PaginationInfoTypedDict", - "Prediction", - "PredictionTypedDict", - "ProcessingStatusOut", - "ProcessingStatusOutTypedDict", - "QueryParamStatus", - "ReferenceChunk", - "ReferenceChunkType", - "ReferenceChunkTypedDict", - "Repositories", - "RepositoriesTypedDict", - "Response1", - "Response1TypedDict", - "ResponseBody", - "ResponseBodyTypedDict", - "ResponseDoneEvent", - "ResponseDoneEventType", - "ResponseDoneEventTypedDict", - "ResponseErrorEvent", - "ResponseErrorEventType", - "ResponseErrorEventTypedDict", - "ResponseFormat", - "ResponseFormatTypedDict", - "ResponseFormats", - "ResponseStartedEvent", - "ResponseStartedEventType", - "ResponseStartedEventTypedDict", - "RetrieveFileOut", - "RetrieveFileOutTypedDict", - "RetrieveModelV1ModelsModelIDGetRequest", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - "Role", - "SDKError", - "SSETypes", - "SampleType", - "Security", - "SecurityTypedDict", - "ShareEnum", - "SharingDelete", - "SharingDeleteTypedDict", - "SharingIn", - "SharingInTypedDict", - "SharingOut", - "SharingOutTypedDict", - "Source", - "Status", - "Stop", - "StopTypedDict", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentTypedDict", - "SystemMessageTypedDict", - "TextChunk", - "TextChunkType", - "TextChunkTypedDict", - "ThinkChunk", - "ThinkChunkType", - "ThinkChunkTypedDict", - "Thinking", - "ThinkingTypedDict", - "TimestampGranularity", - "Tool", - "ToolCall", - "ToolCallTypedDict", - "ToolChoice", - "ToolChoiceEnum", - "ToolChoiceTypedDict", - "ToolExecutionDeltaEvent", - "ToolExecutionDeltaEventType", - "ToolExecutionDeltaEventTypedDict", - "ToolExecutionDoneEvent", - "ToolExecutionDoneEventType", - "ToolExecutionDoneEventTypedDict", - "ToolExecutionEntry", - "ToolExecutionEntryObject", - "ToolExecutionEntryType", - "ToolExecutionEntryTypedDict", - "ToolExecutionStartedEvent", - "ToolExecutionStartedEventType", - "ToolExecutionStartedEventTypedDict", - "ToolFileChunk", - "ToolFileChunkType", - "ToolFileChunkTypedDict", - "ToolMessage", - "ToolMessageContent", - "ToolMessageContentTypedDict", - "ToolMessageRole", - "ToolMessageTypedDict", - "ToolReferenceChunk", - "ToolReferenceChunkType", - "ToolReferenceChunkTypedDict", - "ToolTypedDict", - "ToolTypes", - "Tools", - "ToolsTypedDict", - "TrainingFile", - "TrainingFileTypedDict", - "TranscriptionResponse", - "TranscriptionResponseTypedDict", - "TranscriptionSegmentChunk", - "TranscriptionSegmentChunkTypedDict", - "TranscriptionStreamDone", - "TranscriptionStreamDoneType", - "TranscriptionStreamDoneTypedDict", - "TranscriptionStreamEventTypes", - "TranscriptionStreamEvents", - "TranscriptionStreamEventsData", - "TranscriptionStreamEventsDataTypedDict", - "TranscriptionStreamEventsTypedDict", - "TranscriptionStreamLanguage", - "TranscriptionStreamLanguageType", - "TranscriptionStreamLanguageTypedDict", - "TranscriptionStreamSegmentDelta", - "TranscriptionStreamSegmentDeltaType", - "TranscriptionStreamSegmentDeltaTypedDict", - "TranscriptionStreamTextDelta", - "TranscriptionStreamTextDeltaType", - "TranscriptionStreamTextDeltaTypedDict", - "Two", - "TwoTypedDict", - "Type", - "UnarchiveFTModelOut", - "UnarchiveFTModelOutObject", - "UnarchiveFTModelOutTypedDict", - "UpdateFTModelIn", - "UpdateFTModelInTypedDict", - "UploadFileOut", - "UploadFileOutTypedDict", - "UsageInfo", - "UsageInfoTypedDict", - "UserMessage", - "UserMessageContent", - "UserMessageContentTypedDict", - "UserMessageRole", - "UserMessageTypedDict", - "ValidationError", - "ValidationErrorTypedDict", - "WandbIntegration", - "WandbIntegrationOut", - "WandbIntegrationOutType", - "WandbIntegrationOutTypedDict", - "WandbIntegrationType", - "WandbIntegrationTypedDict", - "WebSearchPremiumTool", - "WebSearchPremiumToolType", - "WebSearchPremiumToolTypedDict", - "WebSearchTool", - "WebSearchToolType", - "WebSearchToolTypedDict", -] - -_dynamic_imports: dict[str, str] = { - "Agent": ".agent", - "AgentObject": ".agent", - "AgentTools": ".agent", - "AgentToolsTypedDict": ".agent", - "AgentTypedDict": ".agent", - "AgentConversation": ".agentconversation", - "AgentConversationObject": ".agentconversation", - "AgentConversationTypedDict": ".agentconversation", - "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTools": ".agentcreationrequest", - "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", - "AgentCreationRequestTypedDict": ".agentcreationrequest", - "AgentHandoffDoneEvent": ".agenthandoffdoneevent", - "AgentHandoffDoneEventType": ".agenthandoffdoneevent", - "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", - "AgentHandoffEntry": ".agenthandoffentry", - "AgentHandoffEntryObject": ".agenthandoffentry", - "AgentHandoffEntryType": ".agenthandoffentry", - "AgentHandoffEntryTypedDict": ".agenthandoffentry", - "AgentHandoffStartedEvent": ".agenthandoffstartedevent", - "AgentHandoffStartedEventType": ".agenthandoffstartedevent", - "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", - "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", - "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", - "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "ResponseBody": ".agents_api_v1_conversations_listop", - "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", - "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", - "AgentsCompletionRequest": ".agentscompletionrequest", - "AgentsCompletionRequestMessages": ".agentscompletionrequest", - "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestStop": ".agentscompletionrequest", - "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", - "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", - "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", - "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTools": ".agentupdaterequest", - "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", - "AgentUpdateRequestTypedDict": ".agentupdaterequest", - "APIEndpoint": ".apiendpoint", - "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutObject": ".archiveftmodelout", - "ArchiveFTModelOutTypedDict": ".archiveftmodelout", - "AssistantMessage": ".assistantmessage", - "AssistantMessageContent": ".assistantmessage", - "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", - "AssistantMessageTypedDict": ".assistantmessage", - "AudioChunk": ".audiochunk", - "AudioChunkType": ".audiochunk", - "AudioChunkTypedDict": ".audiochunk", - "AudioTranscriptionRequest": ".audiotranscriptionrequest", - "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", - "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", - "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", - "BaseModelCard": ".basemodelcard", - "BaseModelCardType": ".basemodelcard", - "BaseModelCardTypedDict": ".basemodelcard", - "BatchError": ".batcherror", - "BatchErrorTypedDict": ".batcherror", - "BatchJobIn": ".batchjobin", - "BatchJobInTypedDict": ".batchjobin", - "BatchJobOut": ".batchjobout", - "BatchJobOutObject": ".batchjobout", - "BatchJobOutTypedDict": ".batchjobout", - "BatchJobsOut": ".batchjobsout", - "BatchJobsOutObject": ".batchjobsout", - "BatchJobsOutTypedDict": ".batchjobsout", - "BatchJobStatus": ".batchjobstatus", - "BuiltInConnectors": ".builtinconnectors", - "ChatClassificationRequest": ".chatclassificationrequest", - "ChatClassificationRequestTypedDict": ".chatclassificationrequest", - "ChatCompletionChoice": ".chatcompletionchoice", - "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", - "FinishReason": ".chatcompletionchoice", - "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestToolChoice": ".chatcompletionrequest", - "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", - "ChatCompletionRequestTypedDict": ".chatcompletionrequest", - "Messages": ".chatcompletionrequest", - "MessagesTypedDict": ".chatcompletionrequest", - "Stop": ".chatcompletionrequest", - "StopTypedDict": ".chatcompletionrequest", - "ChatCompletionResponse": ".chatcompletionresponse", - "ChatCompletionResponseTypedDict": ".chatcompletionresponse", - "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "ChatModerationRequest": ".chatmoderationrequest", - "ChatModerationRequestInputs": ".chatmoderationrequest", - "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", - "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "One": ".chatmoderationrequest", - "OneTypedDict": ".chatmoderationrequest", - "Two": ".chatmoderationrequest", - "TwoTypedDict": ".chatmoderationrequest", - "CheckpointOut": ".checkpointout", - "CheckpointOutTypedDict": ".checkpointout", - "ClassificationRequest": ".classificationrequest", - "ClassificationRequestInputs": ".classificationrequest", - "ClassificationRequestInputsTypedDict": ".classificationrequest", - "ClassificationRequestTypedDict": ".classificationrequest", - "ClassificationResponse": ".classificationresponse", - "ClassificationResponseTypedDict": ".classificationresponse", - "ClassificationTargetResult": ".classificationtargetresult", - "ClassificationTargetResultTypedDict": ".classificationtargetresult", - "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", - "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", - "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", - "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", - "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutModelType": ".classifierftmodelout", - "ClassifierFTModelOutObject": ".classifierftmodelout", - "ClassifierFTModelOutTypedDict": ".classifierftmodelout", - "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegrations": ".classifierjobout", - "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", - "ClassifierJobOutJobType": ".classifierjobout", - "ClassifierJobOutObject": ".classifierjobout", - "ClassifierJobOutStatus": ".classifierjobout", - "ClassifierJobOutTypedDict": ".classifierjobout", - "ClassifierTargetIn": ".classifiertargetin", - "ClassifierTargetInTypedDict": ".classifiertargetin", - "ClassifierTargetOut": ".classifiertargetout", - "ClassifierTargetOutTypedDict": ".classifiertargetout", - "ClassifierTrainingParameters": ".classifiertrainingparameters", - "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", - "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", - "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", - "CodeInterpreterTool": ".codeinterpretertool", - "CodeInterpreterToolType": ".codeinterpretertool", - "CodeInterpreterToolTypedDict": ".codeinterpretertool", - "CompletionArgs": ".completionargs", - "CompletionArgsTypedDict": ".completionargs", - "CompletionArgsStop": ".completionargsstop", - "CompletionArgsStopTypedDict": ".completionargsstop", - "CompletionChunk": ".completionchunk", - "CompletionChunkTypedDict": ".completionchunk", - "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutJobType": ".completiondetailedjobout", - "CompletionDetailedJobOutObject": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutStatus": ".completiondetailedjobout", - "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", - "CompletionEvent": ".completionevent", - "CompletionEventTypedDict": ".completionevent", - "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutObject": ".completionftmodelout", - "CompletionFTModelOutTypedDict": ".completionftmodelout", - "ModelType": ".completionftmodelout", - "CompletionJobOut": ".completionjobout", - "CompletionJobOutObject": ".completionjobout", - "CompletionJobOutTypedDict": ".completionjobout", - "Integrations": ".completionjobout", - "IntegrationsTypedDict": ".completionjobout", - "JobType": ".completionjobout", - "Repositories": ".completionjobout", - "RepositoriesTypedDict": ".completionjobout", - "Status": ".completionjobout", - "CompletionResponseStreamChoice": ".completionresponsestreamchoice", - "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", - "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "CompletionTrainingParameters": ".completiontrainingparameters", - "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", - "CompletionTrainingParametersIn": ".completiontrainingparametersin", - "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", - "ContentChunk": ".contentchunk", - "ContentChunkTypedDict": ".contentchunk", - "ConversationAppendRequest": ".conversationappendrequest", - "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", - "ConversationAppendRequestTypedDict": ".conversationappendrequest", - "ConversationAppendStreamRequest": ".conversationappendstreamrequest", - "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", - "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", - "ConversationEvents": ".conversationevents", - "ConversationEventsData": ".conversationevents", - "ConversationEventsDataTypedDict": ".conversationevents", - "ConversationEventsTypedDict": ".conversationevents", - "ConversationHistory": ".conversationhistory", - "ConversationHistoryObject": ".conversationhistory", - "ConversationHistoryTypedDict": ".conversationhistory", - "Entries": ".conversationhistory", - "EntriesTypedDict": ".conversationhistory", - "ConversationInputs": ".conversationinputs", - "ConversationInputsTypedDict": ".conversationinputs", - "ConversationMessages": ".conversationmessages", - "ConversationMessagesObject": ".conversationmessages", - "ConversationMessagesTypedDict": ".conversationmessages", - "ConversationRequest": ".conversationrequest", - "ConversationRequestTypedDict": ".conversationrequest", - "HandoffExecution": ".conversationrequest", - "Tools": ".conversationrequest", - "ToolsTypedDict": ".conversationrequest", - "ConversationResponse": ".conversationresponse", - "ConversationResponseObject": ".conversationresponse", - "ConversationResponseTypedDict": ".conversationresponse", - "Outputs": ".conversationresponse", - "OutputsTypedDict": ".conversationresponse", - "ConversationRestartRequest": ".conversationrestartrequest", - "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", - "ConversationRestartRequestTypedDict": ".conversationrestartrequest", - "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", - "ConversationStreamRequest": ".conversationstreamrequest", - "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", - "ConversationStreamRequestTools": ".conversationstreamrequest", - "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", - "ConversationStreamRequestTypedDict": ".conversationstreamrequest", - "ConversationUsageInfo": ".conversationusageinfo", - "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", - "DeleteFileOut": ".deletefileout", - "DeleteFileOutTypedDict": ".deletefileout", - "DeleteModelOut": ".deletemodelout", - "DeleteModelOutTypedDict": ".deletemodelout", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", - "DeltaMessage": ".deltamessage", - "DeltaMessageTypedDict": ".deltamessage", - "DocumentLibraryTool": ".documentlibrarytool", - "DocumentLibraryToolType": ".documentlibrarytool", - "DocumentLibraryToolTypedDict": ".documentlibrarytool", - "DocumentOut": ".documentout", - "DocumentOutTypedDict": ".documentout", - "DocumentTextContent": ".documenttextcontent", - "DocumentTextContentTypedDict": ".documenttextcontent", - "DocumentUpdateIn": ".documentupdatein", - "DocumentUpdateInTypedDict": ".documentupdatein", - "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", - "DocumentURLChunkTypedDict": ".documenturlchunk", - "EmbeddingDtype": ".embeddingdtype", - "EmbeddingRequest": ".embeddingrequest", - "EmbeddingRequestInputs": ".embeddingrequest", - "EmbeddingRequestInputsTypedDict": ".embeddingrequest", - "EmbeddingRequestTypedDict": ".embeddingrequest", - "EmbeddingResponse": ".embeddingresponse", - "EmbeddingResponseTypedDict": ".embeddingresponse", - "EmbeddingResponseData": ".embeddingresponsedata", - "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", - "EntityType": ".entitytype", - "EventOut": ".eventout", - "EventOutTypedDict": ".eventout", - "File": ".file", - "FileTypedDict": ".file", - "FileChunk": ".filechunk", - "FileChunkTypedDict": ".filechunk", - "FilePurpose": ".filepurpose", - "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", - "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", - "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", - "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", - "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", - "FileSchema": ".fileschema", - "FileSchemaTypedDict": ".fileschema", - "FileSignedURL": ".filesignedurl", - "FileSignedURLTypedDict": ".filesignedurl", - "FIMCompletionRequest": ".fimcompletionrequest", - "FIMCompletionRequestStop": ".fimcompletionrequest", - "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", - "FIMCompletionRequestTypedDict": ".fimcompletionrequest", - "FIMCompletionResponse": ".fimcompletionresponse", - "FIMCompletionResponseTypedDict": ".fimcompletionresponse", - "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", - "FineTuneableModelType": ".finetuneablemodeltype", - "FTClassifierLossFunction": ".ftclassifierlossfunction", - "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", - "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", - "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", - "FTModelCardTypedDict": ".ftmodelcard", - "Function": ".function", - "FunctionTypedDict": ".function", - "Arguments": ".functioncall", - "ArgumentsTypedDict": ".functioncall", - "FunctionCall": ".functioncall", - "FunctionCallTypedDict": ".functioncall", - "FunctionCallEntry": ".functioncallentry", - "FunctionCallEntryObject": ".functioncallentry", - "FunctionCallEntryType": ".functioncallentry", - "FunctionCallEntryTypedDict": ".functioncallentry", - "FunctionCallEntryArguments": ".functioncallentryarguments", - "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", - "FunctionCallEvent": ".functioncallevent", - "FunctionCallEventType": ".functioncallevent", - "FunctionCallEventTypedDict": ".functioncallevent", - "FunctionName": ".functionname", - "FunctionNameTypedDict": ".functionname", - "FunctionResultEntry": ".functionresultentry", - "FunctionResultEntryObject": ".functionresultentry", - "FunctionResultEntryType": ".functionresultentry", - "FunctionResultEntryTypedDict": ".functionresultentry", - "FunctionTool": ".functiontool", - "FunctionToolType": ".functiontool", - "FunctionToolTypedDict": ".functiontool", - "GithubRepositoryIn": ".githubrepositoryin", - "GithubRepositoryInType": ".githubrepositoryin", - "GithubRepositoryInTypedDict": ".githubrepositoryin", - "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutType": ".githubrepositoryout", - "GithubRepositoryOutTypedDict": ".githubrepositoryout", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", - "ImageGenerationTool": ".imagegenerationtool", - "ImageGenerationToolType": ".imagegenerationtool", - "ImageGenerationToolTypedDict": ".imagegenerationtool", - "ImageURL": ".imageurl", - "ImageURLTypedDict": ".imageurl", - "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", - "ImageURLChunkTypedDict": ".imageurlchunk", - "InputEntries": ".inputentries", - "InputEntriesTypedDict": ".inputentries", - "Inputs": ".inputs", - "InputsTypedDict": ".inputs", - "InstructRequestInputs": ".inputs", - "InstructRequestInputsMessages": ".inputs", - "InstructRequestInputsMessagesTypedDict": ".inputs", - "InstructRequestInputsTypedDict": ".inputs", - "InstructRequest": ".instructrequest", - "InstructRequestMessages": ".instructrequest", - "InstructRequestMessagesTypedDict": ".instructrequest", - "InstructRequestTypedDict": ".instructrequest", - "Hyperparameters": ".jobin", - "HyperparametersTypedDict": ".jobin", - "JobIn": ".jobin", - "JobInIntegrations": ".jobin", - "JobInIntegrationsTypedDict": ".jobin", - "JobInRepositories": ".jobin", - "JobInRepositoriesTypedDict": ".jobin", - "JobInTypedDict": ".jobin", - "JobMetadataOut": ".jobmetadataout", - "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsOut": ".jobsout", - "JobsOutData": ".jobsout", - "JobsOutDataTypedDict": ".jobsout", - "JobsOutObject": ".jobsout", - "JobsOutTypedDict": ".jobsout", - "JSONSchema": ".jsonschema", - "JSONSchemaTypedDict": ".jsonschema", - "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutObject": ".legacyjobmetadataout", - "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibrariesDeleteV1Request": ".libraries_delete_v1op", - "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", - "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", - "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", - "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", - "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", - "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", - "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", - "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", - "LibrariesGetV1Request": ".libraries_get_v1op", - "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", - "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", - "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", - "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", - "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", - "LibrariesShareListV1Request": ".libraries_share_list_v1op", - "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", - "LibrariesUpdateV1Request": ".libraries_update_v1op", - "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", - "LibraryIn": ".libraryin", - "LibraryInTypedDict": ".libraryin", - "LibraryInUpdate": ".libraryinupdate", - "LibraryInUpdateTypedDict": ".libraryinupdate", - "LibraryOut": ".libraryout", - "LibraryOutTypedDict": ".libraryout", - "ListDocumentOut": ".listdocumentout", - "ListDocumentOutTypedDict": ".listdocumentout", - "ListFilesOut": ".listfilesout", - "ListFilesOutTypedDict": ".listfilesout", - "ListLibraryOut": ".listlibraryout", - "ListLibraryOutTypedDict": ".listlibraryout", - "ListSharingOut": ".listsharingout", - "ListSharingOutTypedDict": ".listsharingout", - "MessageEntries": ".messageentries", - "MessageEntriesTypedDict": ".messageentries", - "MessageInputContentChunks": ".messageinputcontentchunks", - "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", - "MessageInputEntry": ".messageinputentry", - "MessageInputEntryContent": ".messageinputentry", - "MessageInputEntryContentTypedDict": ".messageinputentry", - "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", - "MessageInputEntryTypedDict": ".messageinputentry", - "Object": ".messageinputentry", - "MessageOutputContentChunks": ".messageoutputcontentchunks", - "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", - "MessageOutputEntry": ".messageoutputentry", - "MessageOutputEntryContent": ".messageoutputentry", - "MessageOutputEntryContentTypedDict": ".messageoutputentry", - "MessageOutputEntryObject": ".messageoutputentry", - "MessageOutputEntryRole": ".messageoutputentry", - "MessageOutputEntryType": ".messageoutputentry", - "MessageOutputEntryTypedDict": ".messageoutputentry", - "MessageOutputEvent": ".messageoutputevent", - "MessageOutputEventContent": ".messageoutputevent", - "MessageOutputEventContentTypedDict": ".messageoutputevent", - "MessageOutputEventRole": ".messageoutputevent", - "MessageOutputEventType": ".messageoutputevent", - "MessageOutputEventTypedDict": ".messageoutputevent", - "MetricOut": ".metricout", - "MetricOutTypedDict": ".metricout", - "MistralPromptMode": ".mistralpromptmode", - "ModelCapabilities": ".modelcapabilities", - "ModelCapabilitiesTypedDict": ".modelcapabilities", - "ModelConversation": ".modelconversation", - "ModelConversationObject": ".modelconversation", - "ModelConversationTools": ".modelconversation", - "ModelConversationToolsTypedDict": ".modelconversation", - "ModelConversationTypedDict": ".modelconversation", - "Data": ".modellist", - "DataTypedDict": ".modellist", - "ModelList": ".modellist", - "ModelListTypedDict": ".modellist", - "ModerationObject": ".moderationobject", - "ModerationObjectTypedDict": ".moderationobject", - "ModerationResponse": ".moderationresponse", - "ModerationResponseTypedDict": ".moderationresponse", - "OCRImageObject": ".ocrimageobject", - "OCRImageObjectTypedDict": ".ocrimageobject", - "OCRPageDimensions": ".ocrpagedimensions", - "OCRPageDimensionsTypedDict": ".ocrpagedimensions", - "OCRPageObject": ".ocrpageobject", - "OCRPageObjectTypedDict": ".ocrpageobject", - "Document": ".ocrrequest", - "DocumentTypedDict": ".ocrrequest", - "OCRRequest": ".ocrrequest", - "OCRRequestTypedDict": ".ocrrequest", - "OCRResponse": ".ocrresponse", - "OCRResponseTypedDict": ".ocrresponse", - "OCRUsageInfo": ".ocrusageinfo", - "OCRUsageInfoTypedDict": ".ocrusageinfo", - "OutputContentChunks": ".outputcontentchunks", - "OutputContentChunksTypedDict": ".outputcontentchunks", - "PaginationInfo": ".paginationinfo", - "PaginationInfoTypedDict": ".paginationinfo", - "Prediction": ".prediction", - "PredictionTypedDict": ".prediction", - "ProcessingStatusOut": ".processingstatusout", - "ProcessingStatusOutTypedDict": ".processingstatusout", - "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", - "ReferenceChunkTypedDict": ".referencechunk", - "ResponseDoneEvent": ".responsedoneevent", - "ResponseDoneEventType": ".responsedoneevent", - "ResponseDoneEventTypedDict": ".responsedoneevent", - "ResponseErrorEvent": ".responseerrorevent", - "ResponseErrorEventType": ".responseerrorevent", - "ResponseErrorEventTypedDict": ".responseerrorevent", - "ResponseFormat": ".responseformat", - "ResponseFormatTypedDict": ".responseformat", - "ResponseFormats": ".responseformats", - "ResponseStartedEvent": ".responsestartedevent", - "ResponseStartedEventType": ".responsestartedevent", - "ResponseStartedEventTypedDict": ".responsestartedevent", - "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveFileOut": ".retrievefileout", - "RetrieveFileOutTypedDict": ".retrievefileout", - "SampleType": ".sampletype", - "SDKError": ".sdkerror", - "Security": ".security", - "SecurityTypedDict": ".security", - "ShareEnum": ".shareenum", - "SharingDelete": ".sharingdelete", - "SharingDeleteTypedDict": ".sharingdelete", - "SharingIn": ".sharingin", - "SharingInTypedDict": ".sharingin", - "SharingOut": ".sharingout", - "SharingOutTypedDict": ".sharingout", - "Source": ".source", - "SSETypes": ".ssetypes", - "Role": ".systemmessage", - "SystemMessage": ".systemmessage", - "SystemMessageContent": ".systemmessage", - "SystemMessageContentTypedDict": ".systemmessage", - "SystemMessageTypedDict": ".systemmessage", - "TextChunk": ".textchunk", - "TextChunkType": ".textchunk", - "TextChunkTypedDict": ".textchunk", - "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", - "ThinkChunkTypedDict": ".thinkchunk", - "Thinking": ".thinkchunk", - "ThinkingTypedDict": ".thinkchunk", - "TimestampGranularity": ".timestampgranularity", - "Tool": ".tool", - "ToolTypedDict": ".tool", - "Metadata": ".toolcall", - "MetadataTypedDict": ".toolcall", - "ToolCall": ".toolcall", - "ToolCallTypedDict": ".toolcall", - "ToolChoice": ".toolchoice", - "ToolChoiceTypedDict": ".toolchoice", - "ToolChoiceEnum": ".toolchoiceenum", - "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDoneEvent": ".toolexecutiondoneevent", - "ToolExecutionDoneEventType": ".toolexecutiondoneevent", - "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", - "ToolExecutionEntry": ".toolexecutionentry", - "ToolExecutionEntryObject": ".toolexecutionentry", - "ToolExecutionEntryType": ".toolexecutionentry", - "ToolExecutionEntryTypedDict": ".toolexecutionentry", - "ToolExecutionStartedEvent": ".toolexecutionstartedevent", - "ToolExecutionStartedEventType": ".toolexecutionstartedevent", - "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", - "ToolFileChunk": ".toolfilechunk", - "ToolFileChunkType": ".toolfilechunk", - "ToolFileChunkTypedDict": ".toolfilechunk", - "ToolMessage": ".toolmessage", - "ToolMessageContent": ".toolmessage", - "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", - "ToolMessageTypedDict": ".toolmessage", - "ToolReferenceChunk": ".toolreferencechunk", - "ToolReferenceChunkType": ".toolreferencechunk", - "ToolReferenceChunkTypedDict": ".toolreferencechunk", - "ToolTypes": ".tooltypes", - "TrainingFile": ".trainingfile", - "TrainingFileTypedDict": ".trainingfile", - "TranscriptionResponse": ".transcriptionresponse", - "TranscriptionResponseTypedDict": ".transcriptionresponse", - "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", - "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", - "Type": ".transcriptionsegmentchunk", - "TranscriptionStreamDone": ".transcriptionstreamdone", - "TranscriptionStreamDoneType": ".transcriptionstreamdone", - "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", - "TranscriptionStreamEvents": ".transcriptionstreamevents", - "TranscriptionStreamEventsData": ".transcriptionstreamevents", - "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", - "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", - "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", - "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", - "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", - "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutObject": ".unarchiveftmodelout", - "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", - "UpdateFTModelIn": ".updateftmodelin", - "UpdateFTModelInTypedDict": ".updateftmodelin", - "UploadFileOut": ".uploadfileout", - "UploadFileOutTypedDict": ".uploadfileout", - "UsageInfo": ".usageinfo", - "UsageInfoTypedDict": ".usageinfo", - "UserMessage": ".usermessage", - "UserMessageContent": ".usermessage", - "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", - "UserMessageTypedDict": ".usermessage", - "Loc": ".validationerror", - "LocTypedDict": ".validationerror", - "ValidationError": ".validationerror", - "ValidationErrorTypedDict": ".validationerror", - "WandbIntegration": ".wandbintegration", - "WandbIntegrationType": ".wandbintegration", - "WandbIntegrationTypedDict": ".wandbintegration", - "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutType": ".wandbintegrationout", - "WandbIntegrationOutTypedDict": ".wandbintegrationout", - "WebSearchPremiumTool": ".websearchpremiumtool", - "WebSearchPremiumToolType": ".websearchpremiumtool", - "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", - "WebSearchTool": ".websearchtool", - "WebSearchToolType": ".websearchtool", - "WebSearchToolTypedDict": ".websearchtool", -} - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = import_module(module_name, __package__) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e - - -def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py deleted file mode 100644 index b6bf17ab..00000000 --- a/src/mistralai/models/agent.py +++ /dev/null @@ -1,129 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentToolsTypedDict = TypeAliasType( - "AgentToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -AgentObject = Literal["agent"] - - -class AgentTypedDict(TypedDict): - model: str - name: str - id: str - version: int - created_at: datetime - updated_at: datetime - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - object: NotRequired[AgentObject] - - -class Agent(BaseModel): - model: str - - name: str - - id: str - - version: int - - created_at: datetime - - updated_at: datetime - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - object: Optional[AgentObject] = "agent" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "object", - ] - nullable_fields = ["instructions", "description", "handoffs"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py deleted file mode 100644 index 42ab84f5..00000000 --- a/src/mistralai/models/agentconversation.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentConversationObject = Literal["conversation"] - - -class AgentConversationTypedDict(TypedDict): - id: str - created_at: datetime - updated_at: datetime - agent_id: str - name: NotRequired[Nullable[str]] - r"""Name given to the conversation.""" - description: NotRequired[Nullable[str]] - r"""Description of the what the conversation is about.""" - object: NotRequired[AgentConversationObject] - - -class AgentConversation(BaseModel): - id: str - - created_at: datetime - - updated_at: datetime - - agent_id: str - - name: OptionalNullable[str] = UNSET - r"""Name given to the conversation.""" - - description: OptionalNullable[str] = UNSET - r"""Description of the what the conversation is about.""" - - object: Optional[AgentConversationObject] = "conversation" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description", "object"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py deleted file mode 100644 index 83a27028..00000000 --- a/src/mistralai/models/agentcreationrequest.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentCreationRequestToolsTypedDict = TypeAliasType( - "AgentCreationRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentCreationRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class AgentCreationRequestTypedDict(TypedDict): - model: str - name: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - - -class AgentCreationRequest(BaseModel): - model: str - - name: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentCreationRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - ] - nullable_fields = ["instructions", "description", "handoffs"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py deleted file mode 100644 index fa545a02..00000000 --- a/src/mistralai/models/agenthandoffdoneevent.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffDoneEventType = Literal["agent.handoff.done"] - - -class AgentHandoffDoneEventTypedDict(TypedDict): - id: str - next_agent_id: str - next_agent_name: str - type: NotRequired[AgentHandoffDoneEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class AgentHandoffDoneEvent(BaseModel): - id: str - - next_agent_id: str - - next_agent_name: str - - type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py deleted file mode 100644 index 44bfe0f2..00000000 --- a/src/mistralai/models/agenthandoffentry.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffEntryObject = Literal["entry"] - -AgentHandoffEntryType = Literal["agent.handoff"] - - -class AgentHandoffEntryTypedDict(TypedDict): - previous_agent_id: str - previous_agent_name: str - next_agent_id: str - next_agent_name: str - object: NotRequired[AgentHandoffEntryObject] - type: NotRequired[AgentHandoffEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class AgentHandoffEntry(BaseModel): - previous_agent_id: str - - previous_agent_name: str - - next_agent_id: str - - next_agent_name: str - - object: Optional[AgentHandoffEntryObject] = "entry" - - type: Optional[AgentHandoffEntryType] = "agent.handoff" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py deleted file mode 100644 index 9033a0a9..00000000 --- a/src/mistralai/models/agenthandoffstartedevent.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffStartedEventType = Literal["agent.handoff.started"] - - -class AgentHandoffStartedEventTypedDict(TypedDict): - id: str - previous_agent_id: str - previous_agent_name: str - type: NotRequired[AgentHandoffStartedEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class AgentHandoffStartedEvent(BaseModel): - id: str - - previous_agent_id: str - - previous_agent_name: str - - type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py deleted file mode 100644 index 5dbcecc1..00000000 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): - agent_id: str - - -class AgentsAPIV1AgentsGetRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py deleted file mode 100644 index 25f48a62..00000000 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, QueryParamMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - - -class AgentsAPIV1AgentsListRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 20 diff --git a/src/mistralai/models/agents_api_v1_agents_updateop.py b/src/mistralai/models/agents_api_v1_agents_updateop.py deleted file mode 100644 index 32696fbe..00000000 --- a/src/mistralai/models/agents_api_v1_agents_updateop.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): - agent_id: str - agent_update_request: AgentUpdateRequestTypedDict - - -class AgentsAPIV1AgentsUpdateRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_update_request: Annotated[ - AgentUpdateRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py deleted file mode 100644 index a37a61ba..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentconversation import AgentConversation, AgentConversationTypedDict -from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching metadata.""" - - -class AgentsAPIV1ConversationsGetRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching metadata.""" - - -AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", - Union[AgentConversationTypedDict, ModelConversationTypedDict], -) -r"""Successful Response""" - - -AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - Union[AgentConversation, ModelConversation], -) -r"""Successful Response""" diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py deleted file mode 100644 index f1d3d579..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_listop.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentconversation import AgentConversation, AgentConversationTypedDict -from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, QueryParamMetadata -from typing import Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - - -class AgentsAPIV1ConversationsListRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - -ResponseBodyTypedDict = TypeAliasType( - "ResponseBodyTypedDict", - Union[AgentConversationTypedDict, ModelConversationTypedDict], -) - - -ResponseBody = TypeAliasType( - "ResponseBody", Union[AgentConversation, ModelConversation] -) diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py deleted file mode 100644 index c832edfd..00000000 --- a/src/mistralai/models/agentscompletionrequest.py +++ /dev/null @@ -1,180 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum -from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentsCompletionRequestStopTypedDict = TypeAliasType( - "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestStop = TypeAliasType( - "AgentsCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -AgentsCompletionRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( - "AgentsCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -AgentsCompletionRequestToolChoice = TypeAliasType( - "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class AgentsCompletionRequestTypedDict(TypedDict): - messages: List[AgentsCompletionRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - agent_id: str - r"""The ID of the agent to use for this completion.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[AgentsCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - -class AgentsCompletionRequest(BaseModel): - messages: List[AgentsCompletionRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - agent_id: str - r"""The ID of the agent to use for this completion.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[AgentsCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[AgentsCompletionRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py deleted file mode 100644 index 6e619b77..00000000 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum -from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestStop = TypeAliasType( - "AgentsCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -AgentsCompletionStreamRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -AgentsCompletionStreamRequestToolChoice = TypeAliasType( - "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class AgentsCompletionStreamRequestTypedDict(TypedDict): - messages: List[AgentsCompletionStreamRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - agent_id: str - r"""The ID of the agent to use for this completion.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - -class AgentsCompletionStreamRequest(BaseModel): - messages: List[AgentsCompletionStreamRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - agent_id: str - r"""The ID of the agent to use for this completion.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[AgentsCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py deleted file mode 100644 index f6fcb27a..00000000 --- a/src/mistralai/models/agentupdaterequest.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentUpdateRequestToolsTypedDict = TypeAliasType( - "AgentUpdateRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentUpdateRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class AgentUpdateRequestTypedDict(TypedDict): - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - model: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - - -class AgentUpdateRequest(BaseModel): - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentUpdateRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - model: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "model", - "name", - "description", - "handoffs", - ] - nullable_fields = ["instructions", "model", "name", "description", "handoffs"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py deleted file mode 100644 index a1b42e88..00000000 --- a/src/mistralai/models/apiendpoint.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -APIEndpoint = Union[ - Literal[ - "/v1/chat/completions", - "/v1/embeddings", - "/v1/fim/completions", - "/v1/moderations", - "/v1/chat/moderations", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py deleted file mode 100644 index cff27c4e..00000000 --- a/src/mistralai/models/archiveftmodelout.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ArchiveFTModelOutObject = Literal["model"] - - -class ArchiveFTModelOutTypedDict(TypedDict): - id: str - object: NotRequired[ArchiveFTModelOutObject] - archived: NotRequired[bool] - - -class ArchiveFTModelOut(BaseModel): - id: str - - object: Optional[ArchiveFTModelOutObject] = "model" - - archived: Optional[bool] = True diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py deleted file mode 100644 index 18841a72..00000000 --- a/src/mistralai/models/assistantmessage.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AssistantMessageContentTypedDict = TypeAliasType( - "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -AssistantMessageContent = TypeAliasType( - "AssistantMessageContent", Union[str, List[ContentChunk]] -) - - -AssistantMessageRole = Literal["assistant"] - - -class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[AssistantMessageContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] - - -class AssistantMessage(BaseModel): - content: OptionalNullable[AssistantMessageContent] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - - role: Optional[AssistantMessageRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py deleted file mode 100644 index 2780570a..00000000 --- a/src/mistralai/models/audiochunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AudioChunkType = Literal["input_audio"] - - -class AudioChunkTypedDict(TypedDict): - input_audio: str - type: NotRequired[AudioChunkType] - - -class AudioChunk(BaseModel): - input_audio: str - - type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py deleted file mode 100644 index 371d3ecc..00000000 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .timestampgranularity import TimestampGranularity -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AudioTranscriptionRequestTypedDict(TypedDict): - model: str - file: NotRequired[FileTypedDict] - file_url: NotRequired[Nullable[str]] - r"""Url of a file to be transcribed""" - file_id: NotRequired[Nullable[str]] - r"""ID of a file uploaded to /v1/files""" - language: NotRequired[Nullable[str]] - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - temperature: NotRequired[Nullable[float]] - stream: Literal[False] - timestamp_granularities: NotRequired[List[TimestampGranularity]] - r"""Granularities of timestamps to include in the response.""" - - -class AudioTranscriptionRequest(BaseModel): - model: Annotated[str, FieldMetadata(multipart=True)] - - file: Annotated[ - Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) - ] = None - - file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Url of a file to be transcribed""" - - file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""ID of a file uploaded to /v1/files""" - - language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - - temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( - UNSET - ) - - STREAM: Annotated[ - Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], - pydantic.Field(alias="stream"), - FieldMetadata(multipart=True), - ] = False - - timestamp_granularities: Annotated[ - Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) - ] = None - r"""Granularities of timestamps to include in the response.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py deleted file mode 100644 index 04374503..00000000 --- a/src/mistralai/models/audiotranscriptionrequeststream.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .timestampgranularity import TimestampGranularity -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AudioTranscriptionRequestStreamTypedDict(TypedDict): - model: str - file: NotRequired[FileTypedDict] - file_url: NotRequired[Nullable[str]] - r"""Url of a file to be transcribed""" - file_id: NotRequired[Nullable[str]] - r"""ID of a file uploaded to /v1/files""" - language: NotRequired[Nullable[str]] - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - temperature: NotRequired[Nullable[float]] - stream: Literal[True] - timestamp_granularities: NotRequired[List[TimestampGranularity]] - r"""Granularities of timestamps to include in the response.""" - - -class AudioTranscriptionRequestStream(BaseModel): - model: Annotated[str, FieldMetadata(multipart=True)] - - file: Annotated[ - Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) - ] = None - - file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Url of a file to be transcribed""" - - file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""ID of a file uploaded to /v1/files""" - - language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - - temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( - UNSET - ) - - STREAM: Annotated[ - Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], - pydantic.Field(alias="stream"), - FieldMetadata(multipart=True), - ] = True - - timestamp_granularities: Annotated[ - Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) - ] = None - r"""Granularities of timestamps to include in the response.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py deleted file mode 100644 index a4a061ff..00000000 --- a/src/mistralai/models/basemodelcard.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -BaseModelCardType = Literal["base"] - - -class BaseModelCardTypedDict(TypedDict): - id: str - capabilities: ModelCapabilitiesTypedDict - object: NotRequired[str] - created: NotRequired[int] - owned_by: NotRequired[str] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - deprecation: NotRequired[Nullable[datetime]] - deprecation_replacement_model: NotRequired[Nullable[str]] - default_model_temperature: NotRequired[Nullable[float]] - type: BaseModelCardType - - -class BaseModelCard(BaseModel): - id: str - - capabilities: ModelCapabilities - - object: Optional[str] = "model" - - created: Optional[int] = None - - owned_by: Optional[str] = "mistralai" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - deprecation: OptionalNullable[datetime] = UNSET - - deprecation_replacement_model: OptionalNullable[str] = UNSET - - default_model_temperature: OptionalNullable[float] = UNSET - - TYPE: Annotated[ - Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], - pydantic.Field(alias="type"), - ] = "base" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "type", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batcherror.py b/src/mistralai/models/batcherror.py deleted file mode 100644 index 4f823446..00000000 --- a/src/mistralai/models/batcherror.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class BatchErrorTypedDict(TypedDict): - message: str - count: NotRequired[int] - - -class BatchError(BaseModel): - message: str - - count: Optional[int] = 1 diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py deleted file mode 100644 index aa0bb5be..00000000 --- a/src/mistralai/models/batchjobin.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .apiendpoint import APIEndpoint -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing import Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class BatchJobInTypedDict(TypedDict): - input_files: List[str] - endpoint: APIEndpoint - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, str]]] - timeout_hours: NotRequired[int] - - -class BatchJobIn(BaseModel): - input_files: List[str] - - endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - metadata: OptionalNullable[Dict[str, str]] = UNSET - - timeout_hours: Optional[int] = 24 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["model", "agent_id", "metadata", "timeout_hours"] - nullable_fields = ["model", "agent_id", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py deleted file mode 100644 index 88304313..00000000 --- a/src/mistralai/models/batchjobout.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batcherror import BatchError, BatchErrorTypedDict -from .batchjobstatus import BatchJobStatus -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobOutObject = Literal["batch"] - - -class BatchJobOutTypedDict(TypedDict): - id: str - input_files: List[str] - endpoint: str - errors: List[BatchErrorTypedDict] - status: BatchJobStatus - created_at: int - total_requests: int - completed_requests: int - succeeded_requests: int - failed_requests: int - object: NotRequired[BatchJobOutObject] - metadata: NotRequired[Nullable[Dict[str, Any]]] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - output_file: NotRequired[Nullable[str]] - error_file: NotRequired[Nullable[str]] - started_at: NotRequired[Nullable[int]] - completed_at: NotRequired[Nullable[int]] - - -class BatchJobOut(BaseModel): - id: str - - input_files: List[str] - - endpoint: str - - errors: List[BatchError] - - status: BatchJobStatus - - created_at: int - - total_requests: int - - completed_requests: int - - succeeded_requests: int - - failed_requests: int - - object: Optional[BatchJobOutObject] = "batch" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - output_file: OptionalNullable[str] = UNSET - - error_file: OptionalNullable[str] = UNSET - - started_at: OptionalNullable[int] = UNSET - - completed_at: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "started_at", - "completed_at", - ] - nullable_fields = [ - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "started_at", - "completed_at", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py deleted file mode 100644 index 8ce26f31..00000000 --- a/src/mistralai/models/batchjobsout.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batchjobout import BatchJobOut, BatchJobOutTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobsOutObject = Literal["list"] - - -class BatchJobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[BatchJobOutTypedDict]] - object: NotRequired[BatchJobsOutObject] - - -class BatchJobsOut(BaseModel): - total: int - - data: Optional[List[BatchJobOut]] = None - - object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/models/batchjobstatus.py b/src/mistralai/models/batchjobstatus.py deleted file mode 100644 index 4b28059b..00000000 --- a/src/mistralai/models/batchjobstatus.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -BatchJobStatus = Literal[ - "QUEUED", - "RUNNING", - "SUCCESS", - "FAILED", - "TIMEOUT_EXCEEDED", - "CANCELLATION_REQUESTED", - "CANCELLED", -] diff --git a/src/mistralai/models/builtinconnectors.py b/src/mistralai/models/builtinconnectors.py deleted file mode 100644 index 6a3b2476..00000000 --- a/src/mistralai/models/builtinconnectors.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -BuiltInConnectors = Literal[ - "web_search", - "web_search_premium", - "code_interpreter", - "image_generation", - "document_library", -] diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py deleted file mode 100644 index f06f4f34..00000000 --- a/src/mistralai/models/chatclassificationrequest.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .inputs import Inputs, InputsTypedDict -from mistralai.types import BaseModel -import pydantic -from typing_extensions import Annotated, TypedDict - - -class ChatClassificationRequestTypedDict(TypedDict): - model: str - inputs: InputsTypedDict - r"""Chat to classify""" - - -class ChatClassificationRequest(BaseModel): - model: str - - inputs: Annotated[Inputs, pydantic.Field(alias="input")] - r"""Chat to classify""" diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py deleted file mode 100644 index f4f37fb4..00000000 --- a/src/mistralai/models/chatcompletionchoice.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai.types import BaseModel, UnrecognizedStr -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Literal, Union -from typing_extensions import Annotated, TypedDict - - -FinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr -] - - -class ChatCompletionChoiceTypedDict(TypedDict): - index: int - message: AssistantMessageTypedDict - finish_reason: FinishReason - - -class ChatCompletionChoice(BaseModel): - index: int - - message: AssistantMessage - - finish_reason: Annotated[FinishReason, PlainValidator(validate_open_enum(False))] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py deleted file mode 100644 index 6f195f13..00000000 --- a/src/mistralai/models/chatcompletionrequest.py +++ /dev/null @@ -1,201 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum -from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = TypeAliasType("Stop", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Messages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -ChatCompletionRequestToolChoice = TypeAliasType( - "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class ChatCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[StopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[Messages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[Stop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[ChatCompletionRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py deleted file mode 100644 index 3d03b126..00000000 --- a/src/mistralai/models/chatcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ChatCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: int - choices: List[ChatCompletionChoiceTypedDict] - - -class ChatCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: int - - choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py deleted file mode 100644 index 0fa102e5..00000000 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ /dev/null @@ -1,203 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum -from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ChatCompletionStreamRequestStopTypedDict = TypeAliasType( - "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestStop = TypeAliasType( - "ChatCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionStreamRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -ChatCompletionStreamRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -ChatCompletionStreamRequestToolChoice = TypeAliasType( - "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class ChatCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - response_format: NotRequired[ResponseFormatTypedDict] - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[ChatCompletionStreamRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[ChatCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - response_format: Optional[ResponseFormat] = None - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py deleted file mode 100644 index 2f58d52f..00000000 --- a/src/mistralai/models/chatmoderationrequest.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -import pydantic -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TwoTypedDict = TypeAliasType( - "TwoTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Two = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -OneTypedDict = TypeAliasType( - "OneTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -One = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatModerationRequestInputsTypedDict = TypeAliasType( - "ChatModerationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], -) -r"""Chat to classify""" - - -ChatModerationRequestInputs = TypeAliasType( - "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] -) -r"""Chat to classify""" - - -class ChatModerationRequestTypedDict(TypedDict): - inputs: ChatModerationRequestInputsTypedDict - r"""Chat to classify""" - model: str - - -class ChatModerationRequest(BaseModel): - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] - r"""Chat to classify""" - - model: str diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py deleted file mode 100644 index aefb7731..00000000 --- a/src/mistralai/models/checkpointout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .metricout import MetricOut, MetricOutTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class CheckpointOutTypedDict(TypedDict): - metrics: MetricOutTypedDict - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - step_number: int - r"""The step number that the checkpoint was created at.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - - -class CheckpointOut(BaseModel): - metrics: MetricOut - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - step_number: int - r"""The step number that the checkpoint was created at.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py deleted file mode 100644 index 39e25390..00000000 --- a/src/mistralai/models/classificationrequest.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -ClassificationRequestInputsTypedDict = TypeAliasType( - "ClassificationRequestInputsTypedDict", Union[str, List[str]] -) -r"""Text to classify.""" - - -ClassificationRequestInputs = TypeAliasType( - "ClassificationRequestInputs", Union[str, List[str]] -) -r"""Text to classify.""" - - -class ClassificationRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use.""" - inputs: ClassificationRequestInputsTypedDict - r"""Text to classify.""" - - -class ClassificationRequest(BaseModel): - model: str - r"""ID of the model to use.""" - - inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] - r"""Text to classify.""" diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py deleted file mode 100644 index da5bd281..00000000 --- a/src/mistralai/models/classifierdetailedjobout.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] - -ClassifierDetailedJobOutObject = Literal["job"] - -ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -ClassifierDetailedJobOutIntegrations = WandbIntegrationOut - - -ClassifierDetailedJobOutJobType = Literal["classifier"] - - -class ClassifierDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: ClassifierDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: ClassifierTrainingParametersTypedDict - classifier_targets: List[ClassifierTargetOutTypedDict] - validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[ClassifierDetailedJobOutObject] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierDetailedJobOutJobType] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class ClassifierDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: ClassifierDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: ClassifierTrainingParameters - - classifier_targets: List[ClassifierTargetOut] - - validation_files: OptionalNullable[List[str]] = UNSET - - object: Optional[ClassifierDetailedJobOutObject] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py deleted file mode 100644 index 56ffe96d..00000000 --- a/src/mistralai/models/classifierftmodelout.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierFTModelOutObject = Literal["model"] - -ClassifierFTModelOutModelType = Literal["classifier"] - - -class ClassifierFTModelOutTypedDict(TypedDict): - id: str - created: int - owned_by: str - workspace_id: str - root: str - root_version: str - archived: bool - capabilities: FTModelCapabilitiesOutTypedDict - job: str - classifier_targets: List[ClassifierTargetOutTypedDict] - object: NotRequired[ClassifierFTModelOutObject] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - model_type: NotRequired[ClassifierFTModelOutModelType] - - -class ClassifierFTModelOut(BaseModel): - id: str - - created: int - - owned_by: str - - workspace_id: str - - root: str - - root_version: str - - archived: bool - - capabilities: FTModelCapabilitiesOut - - job: str - - classifier_targets: List[ClassifierTargetOut] - - object: Optional[ClassifierFTModelOutObject] = "model" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - model_type: Optional[ClassifierFTModelOutModelType] = "classifier" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - "model_type", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py deleted file mode 100644 index c8df6da3..00000000 --- a/src/mistralai/models/classifierjobout.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current status of the fine-tuning job.""" - -ClassifierJobOutObject = Literal["job"] -r"""The object type of the fine-tuning job.""" - -ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -ClassifierJobOutIntegrations = WandbIntegrationOut - - -ClassifierJobOutJobType = Literal["classifier"] -r"""The type of job (`FT` for fine-tuning).""" - - -class ClassifierJobOutTypedDict(TypedDict): - id: str - r"""The ID of the job.""" - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: ClassifierJobOutStatus - r"""The current status of the fine-tuning job.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - hyperparameters: ClassifierTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[ClassifierJobOutObject] - r"""The object type of the fine-tuning job.""" - fine_tuned_model: NotRequired[Nullable[str]] - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] - r"""A list of integrations enabled for your fine-tuning job.""" - trained_tokens: NotRequired[Nullable[int]] - r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierJobOutJobType] - r"""The type of job (`FT` for fine-tuning).""" - - -class ClassifierJobOut(BaseModel): - id: str - r"""The ID of the job.""" - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: ClassifierJobOutStatus - r"""The current status of the fine-tuning job.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - - hyperparameters: ClassifierTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data.""" - - object: Optional[ClassifierJobOutObject] = "job" - r"""The object type of the fine-tuning job.""" - - fine_tuned_model: OptionalNullable[str] = UNSET - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET - r"""A list of integrations enabled for your fine-tuning job.""" - - trained_tokens: OptionalNullable[int] = UNSET - r"""Total number of tokens trained.""" - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[ClassifierJobOutJobType] = "classifier" - r"""The type of job (`FT` for fine-tuning).""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertargetin.py b/src/mistralai/models/classifiertargetin.py deleted file mode 100644 index d8a060e4..00000000 --- a/src/mistralai/models/classifiertargetin.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftclassifierlossfunction import FTClassifierLossFunction -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTargetInTypedDict(TypedDict): - name: str - labels: List[str] - weight: NotRequired[float] - loss_function: NotRequired[Nullable[FTClassifierLossFunction]] - - -class ClassifierTargetIn(BaseModel): - name: str - - labels: List[str] - - weight: Optional[float] = 1 - - loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["weight", "loss_function"] - nullable_fields = ["loss_function"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertargetout.py b/src/mistralai/models/classifiertargetout.py deleted file mode 100644 index ddc587f4..00000000 --- a/src/mistralai/models/classifiertargetout.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftclassifierlossfunction import FTClassifierLossFunction -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ClassifierTargetOutTypedDict(TypedDict): - name: str - labels: List[str] - weight: float - loss_function: FTClassifierLossFunction - - -class ClassifierTargetOut(BaseModel): - name: str - - labels: List[str] - - weight: float - - loss_function: FTClassifierLossFunction diff --git a/src/mistralai/models/classifiertrainingparameters.py b/src/mistralai/models/classifiertrainingparameters.py deleted file mode 100644 index 718beeac..00000000 --- a/src/mistralai/models/classifiertrainingparameters.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersTypedDict(TypedDict): - training_steps: NotRequired[Nullable[int]] - learning_rate: NotRequired[float] - weight_decay: NotRequired[Nullable[float]] - warmup_fraction: NotRequired[Nullable[float]] - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParameters(BaseModel): - training_steps: OptionalNullable[int] = UNSET - - learning_rate: Optional[float] = 0.0001 - - weight_decay: OptionalNullable[float] = UNSET - - warmup_fraction: OptionalNullable[float] = UNSET - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertrainingparametersin.py b/src/mistralai/models/classifiertrainingparametersin.py deleted file mode 100644 index 9868843f..00000000 --- a/src/mistralai/models/classifiertrainingparametersin.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py deleted file mode 100644 index b0fc4d20..00000000 --- a/src/mistralai/models/codeinterpretertool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CodeInterpreterToolType = Literal["code_interpreter"] - - -class CodeInterpreterToolTypedDict(TypedDict): - type: NotRequired[CodeInterpreterToolType] - - -class CodeInterpreterTool(BaseModel): - type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py deleted file mode 100644 index 40aa0314..00000000 --- a/src/mistralai/models/completionargs.py +++ /dev/null @@ -1,101 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .toolchoiceenum import ToolChoiceEnum -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionArgsTypedDict(TypedDict): - r"""White-listed arguments from the completion API""" - - stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] - presence_penalty: NotRequired[Nullable[float]] - frequency_penalty: NotRequired[Nullable[float]] - temperature: NotRequired[Nullable[float]] - top_p: NotRequired[Nullable[float]] - max_tokens: NotRequired[Nullable[int]] - random_seed: NotRequired[Nullable[int]] - prediction: NotRequired[Nullable[PredictionTypedDict]] - response_format: NotRequired[Nullable[ResponseFormatTypedDict]] - tool_choice: NotRequired[ToolChoiceEnum] - - -class CompletionArgs(BaseModel): - r"""White-listed arguments from the completion API""" - - stop: OptionalNullable[CompletionArgsStop] = UNSET - - presence_penalty: OptionalNullable[float] = UNSET - - frequency_penalty: OptionalNullable[float] = UNSET - - temperature: OptionalNullable[float] = UNSET - - top_p: OptionalNullable[float] = UNSET - - max_tokens: OptionalNullable[int] = UNSET - - random_seed: OptionalNullable[int] = UNSET - - prediction: OptionalNullable[Prediction] = UNSET - - response_format: OptionalNullable[ResponseFormat] = UNSET - - tool_choice: Optional[ToolChoiceEnum] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - "tool_choice", - ] - nullable_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py deleted file mode 100644 index 4d1fcfbf..00000000 --- a/src/mistralai/models/completionchunk.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionChunkTypedDict(TypedDict): - id: str - model: str - choices: List[CompletionResponseStreamChoiceTypedDict] - object: NotRequired[str] - created: NotRequired[int] - usage: NotRequired[UsageInfoTypedDict] - - -class CompletionChunk(BaseModel): - id: str - - model: str - - choices: List[CompletionResponseStreamChoice] - - object: Optional[str] = None - - created: Optional[int] = None - - usage: Optional[UsageInfo] = None diff --git a/src/mistralai/models/completiondetailedjobout.py b/src/mistralai/models/completiondetailedjobout.py deleted file mode 100644 index 8fb1b62a..00000000 --- a/src/mistralai/models/completiondetailedjobout.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] - -CompletionDetailedJobOutObject = Literal["job"] - -CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -CompletionDetailedJobOutIntegrations = WandbIntegrationOut - - -CompletionDetailedJobOutJobType = Literal["completion"] - -CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepositories = GithubRepositoryOut - - -class CompletionDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: CompletionDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[CompletionDetailedJobOutObject] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[CompletionDetailedJobOutJobType] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class CompletionDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: CompletionDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - - object: Optional[CompletionDetailedJobOutObject] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[CompletionDetailedJobOutJobType] = "completion" - - repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "repositories", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py deleted file mode 100644 index cc859910..00000000 --- a/src/mistralai/models/completionevent.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class CompletionEventTypedDict(TypedDict): - data: CompletionChunkTypedDict - - -class CompletionEvent(BaseModel): - data: CompletionChunk diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py deleted file mode 100644 index ab71168b..00000000 --- a/src/mistralai/models/completionftmodelout.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionFTModelOutObject = Literal["model"] - -ModelType = Literal["completion"] - - -class CompletionFTModelOutTypedDict(TypedDict): - id: str - created: int - owned_by: str - workspace_id: str - root: str - root_version: str - archived: bool - capabilities: FTModelCapabilitiesOutTypedDict - job: str - object: NotRequired[CompletionFTModelOutObject] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - model_type: NotRequired[ModelType] - - -class CompletionFTModelOut(BaseModel): - id: str - - created: int - - owned_by: str - - workspace_id: str - - root: str - - root_version: str - - archived: bool - - capabilities: FTModelCapabilitiesOut - - job: str - - object: Optional[CompletionFTModelOutObject] = "model" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - model_type: Optional[ModelType] = "completion" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - "model_type", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py deleted file mode 100644 index bed67b50..00000000 --- a/src/mistralai/models/completionjobout.py +++ /dev/null @@ -1,175 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Status = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current status of the fine-tuning job.""" - -CompletionJobOutObject = Literal["job"] -r"""The object type of the fine-tuning job.""" - -IntegrationsTypedDict = WandbIntegrationOutTypedDict - - -Integrations = WandbIntegrationOut - - -JobType = Literal["completion"] -r"""The type of job (`FT` for fine-tuning).""" - -RepositoriesTypedDict = GithubRepositoryOutTypedDict - - -Repositories = GithubRepositoryOut - - -class CompletionJobOutTypedDict(TypedDict): - id: str - r"""The ID of the job.""" - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: Status - r"""The current status of the fine-tuning job.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[CompletionJobOutObject] - r"""The object type of the fine-tuning job.""" - fine_tuned_model: NotRequired[Nullable[str]] - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] - r"""A list of integrations enabled for your fine-tuning job.""" - trained_tokens: NotRequired[Nullable[int]] - r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[JobType] - r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[RepositoriesTypedDict]] - - -class CompletionJobOut(BaseModel): - id: str - r"""The ID of the job.""" - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: Status - r"""The current status of the fine-tuning job.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data.""" - - object: Optional[CompletionJobOutObject] = "job" - r"""The object type of the fine-tuning job.""" - - fine_tuned_model: OptionalNullable[str] = UNSET - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - integrations: OptionalNullable[List[Integrations]] = UNSET - r"""A list of integrations enabled for your fine-tuning job.""" - - trained_tokens: OptionalNullable[int] = UNSET - r"""Total number of tokens trained.""" - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[JobType] = "completion" - r"""The type of job (`FT` for fine-tuning).""" - - repositories: Optional[List[Repositories]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "repositories", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py deleted file mode 100644 index 2426148c..00000000 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from mistralai.utils import validate_open_enum -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing import Literal, Union -from typing_extensions import Annotated, TypedDict - - -CompletionResponseStreamChoiceFinishReason = Union[ - Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr -] - - -class CompletionResponseStreamChoiceTypedDict(TypedDict): - index: int - delta: DeltaMessageTypedDict - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - - -class CompletionResponseStreamChoice(BaseModel): - index: int - - delta: DeltaMessage - - finish_reason: Annotated[ - Nullable[CompletionResponseStreamChoiceFinishReason], - PlainValidator(validate_open_enum(False)), - ] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completiontrainingparameters.py b/src/mistralai/models/completiontrainingparameters.py deleted file mode 100644 index 0200e81c..00000000 --- a/src/mistralai/models/completiontrainingparameters.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersTypedDict(TypedDict): - training_steps: NotRequired[Nullable[int]] - learning_rate: NotRequired[float] - weight_decay: NotRequired[Nullable[float]] - warmup_fraction: NotRequired[Nullable[float]] - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParameters(BaseModel): - training_steps: OptionalNullable[int] = UNSET - - learning_rate: Optional[float] = 0.0001 - - weight_decay: OptionalNullable[float] = UNSET - - warmup_fraction: OptionalNullable[float] = UNSET - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completiontrainingparametersin.py b/src/mistralai/models/completiontrainingparametersin.py deleted file mode 100644 index 1f74bb9d..00000000 --- a/src/mistralai/models/completiontrainingparametersin.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py deleted file mode 100644 index 47170eef..00000000 --- a/src/mistralai/models/contentchunk.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audiochunk import AudioChunk, AudioChunkTypedDict -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .filechunk import FileChunk, FileChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - ReferenceChunkTypedDict, - FileChunkTypedDict, - AudioChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ], -) - - -ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[DocumentURLChunk, Tag("document_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - Annotated[FileChunk, Tag("file")], - Annotated[ThinkChunk, Tag("thinking")], - Annotated[AudioChunk, Tag("input_audio")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py deleted file mode 100644 index ecc47e45..00000000 --- a/src/mistralai/models/conversationappendrequest.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationAppendRequestHandoffExecution = Literal["client", "server"] - - -class ConversationAppendRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationAppendRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = False - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py deleted file mode 100644 index 25ffe5fb..00000000 --- a/src/mistralai/models/conversationappendstreamrequest.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationAppendStreamRequestHandoffExecution = Literal["client", "server"] - - -class ConversationAppendStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationAppendStreamRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = True - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( - "server" - ) - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationevents.py b/src/mistralai/models/conversationevents.py deleted file mode 100644 index ba4c628c..00000000 --- a/src/mistralai/models/conversationevents.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict -from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventTypedDict, -) -from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict -from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict -from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict -from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict -from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict -from .ssetypes import SSETypes -from .toolexecutiondeltaevent import ( - ToolExecutionDeltaEvent, - ToolExecutionDeltaEventTypedDict, -) -from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventTypedDict, -) -from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -ConversationEventsDataTypedDict = TypeAliasType( - "ConversationEventsDataTypedDict", - Union[ - ResponseStartedEventTypedDict, - ResponseDoneEventTypedDict, - ResponseErrorEventTypedDict, - ToolExecutionStartedEventTypedDict, - ToolExecutionDeltaEventTypedDict, - ToolExecutionDoneEventTypedDict, - AgentHandoffStartedEventTypedDict, - AgentHandoffDoneEventTypedDict, - FunctionCallEventTypedDict, - MessageOutputEventTypedDict, - ], -) - - -ConversationEventsData = Annotated[ - Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ConversationEventsTypedDict(TypedDict): - event: SSETypes - r"""Server side events sent when streaming a conversation response.""" - data: ConversationEventsDataTypedDict - - -class ConversationEvents(BaseModel): - event: SSETypes - r"""Server side events sent when streaming a conversation response.""" - - data: ConversationEventsData diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py deleted file mode 100644 index 472915fe..00000000 --- a/src/mistralai/models/conversationhistory.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationHistoryObject = Literal["conversation.history"] - -EntriesTypedDict = TypeAliasType( - "EntriesTypedDict", - Union[ - FunctionResultEntryTypedDict, - MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -Entries = TypeAliasType( - "Entries", - Union[ - FunctionResultEntry, - MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, - MessageOutputEntry, - AgentHandoffEntry, - ], -) - - -class ConversationHistoryTypedDict(TypedDict): - r"""Retrieve all entries in a conversation.""" - - conversation_id: str - entries: List[EntriesTypedDict] - object: NotRequired[ConversationHistoryObject] - - -class ConversationHistory(BaseModel): - r"""Retrieve all entries in a conversation.""" - - conversation_id: str - - entries: List[Entries] - - object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py deleted file mode 100644 index 9027045b..00000000 --- a/src/mistralai/models/conversationmessages.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageentries import MessageEntries, MessageEntriesTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationMessagesObject = Literal["conversation.messages"] - - -class ConversationMessagesTypedDict(TypedDict): - r"""Similar to the conversation history but only keep the messages""" - - conversation_id: str - messages: List[MessageEntriesTypedDict] - object: NotRequired[ConversationMessagesObject] - - -class ConversationMessages(BaseModel): - r"""Similar to the conversation history but only keep the messages""" - - conversation_id: str - - messages: List[MessageEntries] - - object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py deleted file mode 100644 index 0fcca512..00000000 --- a/src/mistralai/models/conversationrequest.py +++ /dev/null @@ -1,133 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -HandoffExecution = Literal["client", "server"] - -ToolsTypedDict = TypeAliasType( - "ToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -Tools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ConversationRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[HandoffExecution]] - instructions: NotRequired[Nullable[str]] - tools: NotRequired[Nullable[List[ToolsTypedDict]]] - completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - model: NotRequired[Nullable[str]] - - -class ConversationRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = False - - store: OptionalNullable[bool] = UNSET - - handoff_execution: OptionalNullable[HandoffExecution] = UNSET - - instructions: OptionalNullable[str] = UNSET - - tools: OptionalNullable[List[Tools]] = UNSET - - completion_args: OptionalNullable[CompletionArgs] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "agent_id", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "agent_id", - "model", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py deleted file mode 100644 index 61de8565..00000000 --- a/src/mistralai/models/conversationresponse.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationResponseObject = Literal["conversation.response"] - -OutputsTypedDict = TypeAliasType( - "OutputsTypedDict", - Union[ - ToolExecutionEntryTypedDict, - FunctionCallEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -Outputs = TypeAliasType( - "Outputs", - Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], -) - - -class ConversationResponseTypedDict(TypedDict): - r"""The response after appending new entries to the conversation.""" - - conversation_id: str - outputs: List[OutputsTypedDict] - usage: ConversationUsageInfoTypedDict - object: NotRequired[ConversationResponseObject] - - -class ConversationResponse(BaseModel): - r"""The response after appending new entries to the conversation.""" - - conversation_id: str - - outputs: List[Outputs] - - usage: ConversationUsageInfo - - object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py deleted file mode 100644 index 58376140..00000000 --- a/src/mistralai/models/conversationrestartrequest.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationRestartRequestHandoffExecution = Literal["client", "server"] - - -class ConversationRestartRequestTypedDict(TypedDict): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputsTypedDict - from_entry_id: str - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationRestartRequest(BaseModel): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputs - - from_entry_id: str - - stream: Optional[bool] = False - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py deleted file mode 100644 index f213aea3..00000000 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationRestartStreamRequestHandoffExecution = Literal["client", "server"] - - -class ConversationRestartStreamRequestTypedDict(TypedDict): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputsTypedDict - from_entry_id: str - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationRestartStreamRequest(BaseModel): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputs - - from_entry_id: str - - stream: Optional[bool] = True - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( - "server" - ) - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py deleted file mode 100644 index 0880727e..00000000 --- a/src/mistralai/models/conversationstreamrequest.py +++ /dev/null @@ -1,135 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ConversationStreamRequestHandoffExecution = Literal["client", "server"] - -ConversationStreamRequestToolsTypedDict = TypeAliasType( - "ConversationStreamRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -ConversationStreamRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ConversationStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] - instructions: NotRequired[Nullable[str]] - tools: NotRequired[Nullable[List[ConversationStreamRequestToolsTypedDict]]] - completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - model: NotRequired[Nullable[str]] - - -class ConversationStreamRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = True - - store: OptionalNullable[bool] = UNSET - - handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( - UNSET - ) - - instructions: OptionalNullable[str] = UNSET - - tools: OptionalNullable[List[ConversationStreamRequestTools]] = UNSET - - completion_args: OptionalNullable[CompletionArgs] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "agent_id", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "agent_id", - "model", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationusageinfo.py b/src/mistralai/models/conversationusageinfo.py deleted file mode 100644 index 9ae6f4fb..00000000 --- a/src/mistralai/models/conversationusageinfo.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class ConversationUsageInfoTypedDict(TypedDict): - prompt_tokens: NotRequired[int] - completion_tokens: NotRequired[int] - total_tokens: NotRequired[int] - connector_tokens: NotRequired[Nullable[int]] - connectors: NotRequired[Nullable[Dict[str, int]]] - - -class ConversationUsageInfo(BaseModel): - prompt_tokens: Optional[int] = 0 - - completion_tokens: Optional[int] = 0 - - total_tokens: Optional[int] = 0 - - connector_tokens: OptionalNullable[int] = UNSET - - connectors: OptionalNullable[Dict[str, int]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "connector_tokens", - "connectors", - ] - nullable_fields = ["connector_tokens", "connectors"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py deleted file mode 100644 index 2b346ec4..00000000 --- a/src/mistralai/models/deletefileout.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class DeleteFileOutTypedDict(TypedDict): - id: str - r"""The ID of the deleted file.""" - object: str - r"""The object type that was deleted""" - deleted: bool - r"""The deletion status.""" - - -class DeleteFileOut(BaseModel): - id: str - r"""The ID of the deleted file.""" - - object: str - r"""The object type that was deleted""" - - deleted: bool - r"""The deletion status.""" diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py deleted file mode 100644 index c1b1effc..00000000 --- a/src/mistralai/models/deletemodelout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class DeleteModelOutTypedDict(TypedDict): - id: str - r"""The ID of the deleted model.""" - object: NotRequired[str] - r"""The object type that was deleted""" - deleted: NotRequired[bool] - r"""The deletion status""" - - -class DeleteModelOut(BaseModel): - id: str - r"""The ID of the deleted model.""" - - object: Optional[str] = "model" - r"""The object type that was deleted""" - - deleted: Optional[bool] = True - r"""The deletion status""" diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py deleted file mode 100644 index 88aefe7f..00000000 --- a/src/mistralai/models/deltamessage.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) - - -class DeltaMessageTypedDict(TypedDict): - role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - - -class DeltaMessage(BaseModel): - role: OptionalNullable[str] = UNSET - - content: OptionalNullable[Content] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py deleted file mode 100644 index f36de710..00000000 --- a/src/mistralai/models/documentlibrarytool.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentLibraryToolType = Literal["document_library"] - - -class DocumentLibraryToolTypedDict(TypedDict): - library_ids: List[str] - r"""Ids of the library in which to search.""" - type: NotRequired[DocumentLibraryToolType] - - -class DocumentLibraryTool(BaseModel): - library_ids: List[str] - r"""Ids of the library in which to search.""" - - type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/models/documentout.py b/src/mistralai/models/documentout.py deleted file mode 100644 index 65f1be80..00000000 --- a/src/mistralai/models/documentout.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class DocumentOutTypedDict(TypedDict): - id: str - library_id: str - hash: str - mime_type: str - extension: str - size: int - name: str - created_at: datetime - processing_status: str - uploaded_by_id: str - uploaded_by_type: str - tokens_processing_total: int - summary: NotRequired[Nullable[str]] - last_processed_at: NotRequired[Nullable[datetime]] - number_of_pages: NotRequired[Nullable[int]] - tokens_processing_main_content: NotRequired[Nullable[int]] - tokens_processing_summary: NotRequired[Nullable[int]] - - -class DocumentOut(BaseModel): - id: str - - library_id: str - - hash: str - - mime_type: str - - extension: str - - size: int - - name: str - - created_at: datetime - - processing_status: str - - uploaded_by_id: str - - uploaded_by_type: str - - tokens_processing_total: int - - summary: OptionalNullable[str] = UNSET - - last_processed_at: OptionalNullable[datetime] = UNSET - - number_of_pages: OptionalNullable[int] = UNSET - - tokens_processing_main_content: OptionalNullable[int] = UNSET - - tokens_processing_summary: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "summary", - "last_processed_at", - "number_of_pages", - "tokens_processing_main_content", - "tokens_processing_summary", - ] - nullable_fields = [ - "summary", - "last_processed_at", - "number_of_pages", - "tokens_processing_main_content", - "tokens_processing_summary", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documentupdatein.py b/src/mistralai/models/documentupdatein.py deleted file mode 100644 index 0f6abd5b..00000000 --- a/src/mistralai/models/documentupdatein.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class DocumentUpdateInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - - -class DocumentUpdateIn(BaseModel): - name: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name"] - nullable_fields = ["name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py deleted file mode 100644 index 33f29ba8..00000000 --- a/src/mistralai/models/documenturlchunk.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url"] - - -class DocumentURLChunkTypedDict(TypedDict): - document_url: str - document_name: NotRequired[Nullable[str]] - r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] - - -class DocumentURLChunk(BaseModel): - document_url: str - - document_name: OptionalNullable[str] = UNSET - r"""The filename of the document""" - - type: Optional[DocumentURLChunkType] = "document_url" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_name", "type"] - nullable_fields = ["document_name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/embeddingdtype.py b/src/mistralai/models/embeddingdtype.py deleted file mode 100644 index 4f3c41bd..00000000 --- a/src/mistralai/models/embeddingdtype.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -EmbeddingDtype = Literal["float", "int8", "uint8", "binary", "ubinary"] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py deleted file mode 100644 index 4b5db550..00000000 --- a/src/mistralai/models/embeddingrequest.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .embeddingdtype import EmbeddingDtype -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -EmbeddingRequestInputsTypedDict = TypeAliasType( - "EmbeddingRequestInputsTypedDict", Union[str, List[str]] -) -r"""Text to embed.""" - - -EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""Text to embed.""" - - -class EmbeddingRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use.""" - inputs: EmbeddingRequestInputsTypedDict - r"""Text to embed.""" - output_dimension: NotRequired[Nullable[int]] - r"""The dimension of the output embeddings.""" - output_dtype: NotRequired[EmbeddingDtype] - - -class EmbeddingRequest(BaseModel): - model: str - r"""ID of the model to use.""" - - inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""Text to embed.""" - - output_dimension: OptionalNullable[int] = UNSET - r"""The dimension of the output embeddings.""" - - output_dtype: Optional[EmbeddingDtype] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["output_dimension", "output_dtype"] - nullable_fields = ["output_dimension"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py deleted file mode 100644 index 01e2765f..00000000 --- a/src/mistralai/models/embeddingresponsedata.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class EmbeddingResponseDataTypedDict(TypedDict): - object: NotRequired[str] - embedding: NotRequired[List[float]] - index: NotRequired[int] - - -class EmbeddingResponseData(BaseModel): - object: Optional[str] = None - - embedding: Optional[List[float]] = None - - index: Optional[int] = None diff --git a/src/mistralai/models/entitytype.py b/src/mistralai/models/entitytype.py deleted file mode 100644 index b5149c5f..00000000 --- a/src/mistralai/models/entitytype.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -EntityType = Union[Literal["User", "Workspace", "Org"], UnrecognizedStr] -r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py deleted file mode 100644 index 32819034..00000000 --- a/src/mistralai/models/eventout.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class EventOutTypedDict(TypedDict): - name: str - r"""The name of the event.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - data: NotRequired[Nullable[Dict[str, Any]]] - - -class EventOut(BaseModel): - name: str - r"""The name of the event.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - data: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["data"] - nullable_fields = ["data"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/file.py b/src/mistralai/models/file.py deleted file mode 100644 index 682d7f6e..00000000 --- a/src/mistralai/models/file.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -import io -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata -import pydantic -from typing import IO, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FileTypedDict(TypedDict): - file_name: str - content: Union[bytes, IO[bytes], io.BufferedReader] - content_type: NotRequired[str] - - -class File(BaseModel): - file_name: Annotated[ - str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) - ] - - content: Annotated[ - Union[bytes, IO[bytes], io.BufferedReader], - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(content=True)), - ] - - content_type: Annotated[ - Optional[str], - pydantic.Field(alias="Content-Type"), - FieldMetadata(multipart=True), - ] = None diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py deleted file mode 100644 index 83e60cef..00000000 --- a/src/mistralai/models/filechunk.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class FileChunkTypedDict(TypedDict): - file_id: str - type: Literal["file"] - - -class FileChunk(BaseModel): - file_id: str - - TYPE: Annotated[ - Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], - pydantic.Field(alias="type"), - ] = "file" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py deleted file mode 100644 index 8599192b..00000000 --- a/src/mistralai/models/filepurpose.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py deleted file mode 100644 index a84a7a8e..00000000 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesDeleteFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_download_fileop.py b/src/mistralai/models/files_api_routes_download_fileop.py deleted file mode 100644 index 168a7fa6..00000000 --- a/src/mistralai/models/files_api_routes_download_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesDownloadFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_get_signed_urlop.py b/src/mistralai/models/files_api_routes_get_signed_urlop.py deleted file mode 100644 index 708d40ab..00000000 --- a/src/mistralai/models/files_api_routes_get_signed_urlop.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): - file_id: str - expiry: NotRequired[int] - r"""Number of hours before the url becomes invalid. Defaults to 24h""" - - -class FilesAPIRoutesGetSignedURLRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - expiry: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 24 - r"""Number of hours before the url becomes invalid. Defaults to 24h""" diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py deleted file mode 100644 index 5060c3b8..00000000 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata, validate_open_enum -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - sample_type: NotRequired[Nullable[List[SampleType]]] - source: NotRequired[Nullable[List[Source]]] - search: NotRequired[Nullable[str]] - purpose: NotRequired[Nullable[FilePurpose]] - - -class FilesAPIRoutesListFilesRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - sample_type: Annotated[ - OptionalNullable[ - List[Annotated[SampleType, PlainValidator(validate_open_enum(False))]] - ], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - source: Annotated[ - OptionalNullable[ - List[Annotated[Source, PlainValidator(validate_open_enum(False))]] - ], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - search: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - purpose: Annotated[ - Annotated[ - OptionalNullable[FilePurpose], PlainValidator(validate_open_enum(False)) - ], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "sample_type", - "source", - "search", - "purpose", - ] - nullable_fields = ["sample_type", "source", "search", "purpose"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py deleted file mode 100644 index 0c2a95ef..00000000 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesRetrieveFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py deleted file mode 100644 index 34321cf5..00000000 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .filepurpose import FilePurpose -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): - file: FileTypedDict - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - purpose: NotRequired[FilePurpose] - - -class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): - file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - purpose: Annotated[ - Annotated[Optional[FilePurpose], PlainValidator(validate_open_enum(False))], - FieldMetadata(multipart=True), - ] = None diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py deleted file mode 100644 index 7c7b60c6..00000000 --- a/src/mistralai/models/fileschema.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FileSchemaTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class FileSchema(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - - sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - - source: Annotated[Source, PlainValidator(validate_open_enum(False))] - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/filesignedurl.py b/src/mistralai/models/filesignedurl.py deleted file mode 100644 index 092be7f8..00000000 --- a/src/mistralai/models/filesignedurl.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class FileSignedURLTypedDict(TypedDict): - url: str - - -class FileSignedURL(BaseModel): - url: str diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py deleted file mode 100644 index 06210139..00000000 --- a/src/mistralai/models/fimcompletionrequest.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionRequestStopTypedDict = TypeAliasType( - "FIMCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionRequestStop = TypeAliasType( - "FIMCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[FIMCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionRequest(BaseModel): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[FIMCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py deleted file mode 100644 index f27972b9..00000000 --- a/src/mistralai/models/fimcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class FIMCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: int - choices: List[ChatCompletionChoiceTypedDict] - - -class FIMCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: int - - choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py deleted file mode 100644 index 05cc345b..00000000 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionStreamRequestStopTypedDict = TypeAliasType( - "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionStreamRequestStop = TypeAliasType( - "FIMCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[FIMCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py deleted file mode 100644 index 3507dc91..00000000 --- a/src/mistralai/models/finetuneablemodeltype.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FineTuneableModelType = Literal["completion", "classifier"] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py deleted file mode 100644 index df2d19ff..00000000 --- a/src/mistralai/models/ftclassifierlossfunction.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FTClassifierLossFunction = Literal["single_class", "multi_class"] diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py deleted file mode 100644 index 7f3aa18b..00000000 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class FTModelCapabilitiesOutTypedDict(TypedDict): - completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] - function_calling: NotRequired[bool] - fine_tuning: NotRequired[bool] - classification: NotRequired[bool] - - -class FTModelCapabilitiesOut(BaseModel): - completion_chat: Optional[bool] = True - - completion_fim: Optional[bool] = False - - function_calling: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - classification: Optional[bool] = False diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py deleted file mode 100644 index 7159ce00..00000000 --- a/src/mistralai/models/ftmodelcard.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -FTModelCardType = Literal["fine-tuned"] - - -class FTModelCardTypedDict(TypedDict): - r"""Extra fields for fine-tuned models.""" - - id: str - capabilities: ModelCapabilitiesTypedDict - job: str - root: str - object: NotRequired[str] - created: NotRequired[int] - owned_by: NotRequired[str] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - deprecation: NotRequired[Nullable[datetime]] - deprecation_replacement_model: NotRequired[Nullable[str]] - default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType - archived: NotRequired[bool] - - -class FTModelCard(BaseModel): - r"""Extra fields for fine-tuned models.""" - - id: str - - capabilities: ModelCapabilities - - job: str - - root: str - - object: Optional[str] = "model" - - created: Optional[int] = None - - owned_by: Optional[str] = "mistralai" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - deprecation: OptionalNullable[datetime] = UNSET - - deprecation_replacement_model: OptionalNullable[str] = UNSET - - default_model_temperature: OptionalNullable[float] = UNSET - - TYPE: Annotated[ - Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) - ], - pydantic.Field(alias="type"), - ] = "fine-tuned" - - archived: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "type", - "archived", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py deleted file mode 100644 index 7d40cf75..00000000 --- a/src/mistralai/models/function.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class FunctionTypedDict(TypedDict): - name: str - parameters: Dict[str, Any] - description: NotRequired[str] - strict: NotRequired[bool] - - -class Function(BaseModel): - name: str - - parameters: Dict[str, Any] - - description: Optional[str] = None - - strict: Optional[bool] = None diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py deleted file mode 100644 index 0cce622a..00000000 --- a/src/mistralai/models/functioncall.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) - - -Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) - - -class FunctionCallTypedDict(TypedDict): - name: str - arguments: ArgumentsTypedDict - - -class FunctionCall(BaseModel): - name: str - - arguments: Arguments diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py deleted file mode 100644 index 1e47fda9..00000000 --- a/src/mistralai/models/functioncallentry.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEntryObject = Literal["entry"] - -FunctionCallEntryType = Literal["function.call"] - - -class FunctionCallEntryTypedDict(TypedDict): - tool_call_id: str - name: str - arguments: FunctionCallEntryArgumentsTypedDict - object: NotRequired[FunctionCallEntryObject] - type: NotRequired[FunctionCallEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class FunctionCallEntry(BaseModel): - tool_call_id: str - - name: str - - arguments: FunctionCallEntryArguments - - object: Optional[FunctionCallEntryObject] = "entry" - - type: Optional[FunctionCallEntryType] = "function.call" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py deleted file mode 100644 index 90b4b226..00000000 --- a/src/mistralai/models/functioncallevent.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEventType = Literal["function.call.delta"] - - -class FunctionCallEventTypedDict(TypedDict): - id: str - name: str - tool_call_id: str - arguments: str - type: NotRequired[FunctionCallEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class FunctionCallEvent(BaseModel): - id: str - - name: str - - tool_call_id: str - - arguments: str - - type: Optional[FunctionCallEventType] = "function.call.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py deleted file mode 100644 index 0a6c0b14..00000000 --- a/src/mistralai/models/functionname.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class FunctionNameTypedDict(TypedDict): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str - - -class FunctionName(BaseModel): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py deleted file mode 100644 index f09e11ae..00000000 --- a/src/mistralai/models/functionresultentry.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionResultEntryObject = Literal["entry"] - -FunctionResultEntryType = Literal["function.result"] - - -class FunctionResultEntryTypedDict(TypedDict): - tool_call_id: str - result: str - object: NotRequired[FunctionResultEntryObject] - type: NotRequired[FunctionResultEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class FunctionResultEntry(BaseModel): - tool_call_id: str - - result: str - - object: Optional[FunctionResultEntryObject] = "entry" - - type: Optional[FunctionResultEntryType] = "function.result" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py deleted file mode 100644 index 7ce5c464..00000000 --- a/src/mistralai/models/functiontool.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionToolType = Literal["function"] - - -class FunctionToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[FunctionToolType] - - -class FunctionTool(BaseModel): - function: Function - - type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py deleted file mode 100644 index 801c0540..00000000 --- a/src/mistralai/models/githubrepositoryin.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryInType = Literal["github"] - - -class GithubRepositoryInTypedDict(TypedDict): - name: str - owner: str - token: str - type: NotRequired[GithubRepositoryInType] - ref: NotRequired[Nullable[str]] - weight: NotRequired[float] - - -class GithubRepositoryIn(BaseModel): - name: str - - owner: str - - token: str - - type: Optional[GithubRepositoryInType] = "github" - - ref: OptionalNullable[str] = UNSET - - weight: Optional[float] = 1 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py deleted file mode 100644 index 0d74c17a..00000000 --- a/src/mistralai/models/githubrepositoryout.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryOutType = Literal["github"] - - -class GithubRepositoryOutTypedDict(TypedDict): - name: str - owner: str - commit_id: str - type: NotRequired[GithubRepositoryOutType] - ref: NotRequired[Nullable[str]] - weight: NotRequired[float] - - -class GithubRepositoryOut(BaseModel): - name: str - - owner: str - - commit_id: str - - type: Optional[GithubRepositoryOutType] = "github" - - ref: OptionalNullable[str] = UNSET - - weight: Optional[float] = 1 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py deleted file mode 100644 index 37f2dd76..00000000 --- a/src/mistralai/models/httpvalidationerror.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .validationerror import ValidationError -from mistralai import utils -from mistralai.types import BaseModel -from typing import List, Optional - - -class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None - - -class HTTPValidationError(Exception): - data: HTTPValidationErrorData - - def __init__(self, data: HTTPValidationErrorData): - self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py deleted file mode 100644 index 27bb2d12..00000000 --- a/src/mistralai/models/imagegenerationtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ImageGenerationToolType = Literal["image_generation"] - - -class ImageGenerationToolTypedDict(TypedDict): - type: NotRequired[ImageGenerationToolType] - - -class ImageGenerationTool(BaseModel): - type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py deleted file mode 100644 index 6f077b69..00000000 --- a/src/mistralai/models/imageurl.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class ImageURLTypedDict(TypedDict): - url: str - detail: NotRequired[Nullable[str]] - - -class ImageURL(BaseModel): - url: str - - detail: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py deleted file mode 100644 index 498690f5..00000000 --- a/src/mistralai/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url"] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/models/inputs.py b/src/mistralai/models/inputs.py deleted file mode 100644 index 34d20f34..00000000 --- a/src/mistralai/models/inputs.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .instructrequest import InstructRequest, InstructRequestTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InstructRequestInputsMessagesTypedDict = TypeAliasType( - "InstructRequestInputsMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InstructRequestInputsMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestInputsTypedDict(TypedDict): - messages: List[InstructRequestInputsMessagesTypedDict] - - -class InstructRequestInputs(BaseModel): - messages: List[InstructRequestInputsMessages] - - -InputsTypedDict = TypeAliasType( - "InputsTypedDict", - Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], -) -r"""Chat to classify""" - - -Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) -r"""Chat to classify""" diff --git a/src/mistralai/models/instructrequest.py b/src/mistralai/models/instructrequest.py deleted file mode 100644 index dddbda00..00000000 --- a/src/mistralai/models/instructrequest.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InstructRequestMessagesTypedDict = TypeAliasType( - "InstructRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InstructRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestTypedDict(TypedDict): - messages: List[InstructRequestMessagesTypedDict] - - -class InstructRequest(BaseModel): - messages: List[InstructRequestMessages] diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py deleted file mode 100644 index aa0cd06c..00000000 --- a/src/mistralai/models/jobin.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict -from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, -) -from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, -) -from .finetuneablemodeltype import FineTuneableModelType -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict -from .trainingfile import TrainingFile, TrainingFileTypedDict -from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -JobInIntegrationsTypedDict = WandbIntegrationTypedDict - - -JobInIntegrations = WandbIntegration - - -HyperparametersTypedDict = TypeAliasType( - "HyperparametersTypedDict", - Union[ - ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict - ], -) - - -Hyperparameters = TypeAliasType( - "Hyperparameters", - Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], -) - - -JobInRepositoriesTypedDict = GithubRepositoryInTypedDict - - -JobInRepositories = GithubRepositoryIn - - -class JobInTypedDict(TypedDict): - model: str - r"""The name of the model to fine-tune.""" - hyperparameters: HyperparametersTypedDict - training_files: NotRequired[List[TrainingFileTypedDict]] - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" - suffix: NotRequired[Nullable[str]] - r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] - r"""A list of integrations to enable for your fine-tuning job.""" - auto_start: NotRequired[bool] - r"""This field will be required in a future release.""" - invalid_sample_skip_percentage: NotRequired[float] - job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] - classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] - - -class JobIn(BaseModel): - model: str - r"""The name of the model to fine-tune.""" - - hyperparameters: Hyperparameters - - training_files: Optional[List[TrainingFile]] = None - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" - - suffix: OptionalNullable[str] = UNSET - r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - - integrations: OptionalNullable[List[JobInIntegrations]] = UNSET - r"""A list of integrations to enable for your fine-tuning job.""" - - auto_start: Optional[bool] = None - r"""This field will be required in a future release.""" - - invalid_sample_skip_percentage: Optional[float] = 0 - - job_type: OptionalNullable[FineTuneableModelType] = UNSET - - repositories: OptionalNullable[List[JobInRepositories]] = UNSET - - classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_files", - "validation_files", - "suffix", - "integrations", - "auto_start", - "invalid_sample_skip_percentage", - "job_type", - "repositories", - "classifier_targets", - ] - nullable_fields = [ - "validation_files", - "suffix", - "integrations", - "job_type", - "repositories", - "classifier_targets", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py deleted file mode 100644 index 10ef781e..00000000 --- a/src/mistralai/models/jobmetadataout.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class JobMetadataOutTypedDict(TypedDict): - expected_duration_seconds: NotRequired[Nullable[int]] - cost: NotRequired[Nullable[float]] - cost_currency: NotRequired[Nullable[str]] - train_tokens_per_step: NotRequired[Nullable[int]] - train_tokens: NotRequired[Nullable[int]] - data_tokens: NotRequired[Nullable[int]] - estimated_start_time: NotRequired[Nullable[int]] - - -class JobMetadataOut(BaseModel): - expected_duration_seconds: OptionalNullable[int] = UNSET - - cost: OptionalNullable[float] = UNSET - - cost_currency: OptionalNullable[str] = UNSET - - train_tokens_per_step: OptionalNullable[int] = UNSET - - train_tokens: OptionalNullable[int] = UNSET - - data_tokens: OptionalNullable[int] = UNSET - - estimated_start_time: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py deleted file mode 100644 index 5b83d534..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py deleted file mode 100644 index d9c7b398..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py deleted file mode 100644 index c48246d5..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batchjobstatus import BatchJobStatus -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - created_after: NotRequired[Nullable[datetime]] - created_by_me: NotRequired[bool] - status: NotRequired[Nullable[List[BatchJobStatus]]] - - -class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - model: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - agent_id: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_after: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_by_me: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = False - - status: Annotated[ - OptionalNullable[List[BatchJobStatus]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "agent_id", - "metadata", - "created_after", - "created_by_me", - "status", - ] - nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py deleted file mode 100644 index ceb19a69..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to cancel.""" - - -class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to cancel.""" - - -JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py deleted file mode 100644 index 39af3ea6..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -Response1TypedDict = TypeAliasType( - "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -Response1 = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] - - -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response1], -) -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py deleted file mode 100644 index be99dd2d..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to analyse.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to analyse.""" - - -JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py deleted file mode 100644 index 9aec8eb2..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -QueryParamStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current job state to filter on. When set, the other results are not displayed.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): - page: NotRequired[int] - r"""The page number of the results to be returned.""" - page_size: NotRequired[int] - r"""The number of items to return per page.""" - model: NotRequired[Nullable[str]] - r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - created_after: NotRequired[Nullable[datetime]] - r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - created_before: NotRequired[Nullable[datetime]] - created_by_me: NotRequired[bool] - r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[QueryParamStatus]] - r"""The current job state to filter on. When set, the other results are not displayed.""" - wandb_project: NotRequired[Nullable[str]] - r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - wandb_name: NotRequired[Nullable[str]] - r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - suffix: NotRequired[Nullable[str]] - r"""The model suffix to filter on. When set, the other results are not displayed.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""The page number of the results to be returned.""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - r"""The number of items to return per page.""" - - model: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - - created_after: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - - created_before: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_by_me: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = False - r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - - status: Annotated[ - OptionalNullable[QueryParamStatus], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The current job state to filter on. When set, the other results are not displayed.""" - - wandb_project: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - - wandb_name: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - - suffix: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The model suffix to filter on. When set, the other results are not displayed.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "created_after", - "created_before", - "created_by_me", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - nullable_fields = [ - "model", - "created_after", - "created_before", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py deleted file mode 100644 index 8103b67b..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - -JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py deleted file mode 100644 index a10528ca..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict -from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict -from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import ( - FieldMetadata, - PathParamMetadata, - RequestMetadata, - get_discriminator, -) -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to update.""" - update_ft_model_in: UpdateFTModelInTypedDict - - -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to update.""" - - update_ft_model_in: Annotated[ - UpdateFTModelIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] - - -JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", - Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ - Union[ - Annotated[ClassifierFTModelOut, Tag("classifier")], - Annotated[CompletionFTModelOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py deleted file mode 100644 index abdf18fd..00000000 --- a/src/mistralai/models/jobsout.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -JobsOutDataTypedDict = TypeAliasType( - "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -JobsOutData = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] - - -JobsOutObject = Literal["list"] - - -class JobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[JobsOutDataTypedDict]] - object: NotRequired[JobsOutObject] - - -class JobsOut(BaseModel): - total: int - - data: Optional[List[JobsOutData]] = None - - object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py deleted file mode 100644 index e2b6a45e..00000000 --- a/src/mistralai/models/jsonschema.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py deleted file mode 100644 index d878173b..00000000 --- a/src/mistralai/models/legacyjobmetadataout.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -LegacyJobMetadataOutObject = Literal["job.metadata"] - - -class LegacyJobMetadataOutTypedDict(TypedDict): - details: str - expected_duration_seconds: NotRequired[Nullable[int]] - r"""The approximated time (in seconds) for the fine-tuning process to complete.""" - cost: NotRequired[Nullable[float]] - r"""The cost of the fine-tuning job.""" - cost_currency: NotRequired[Nullable[str]] - r"""The currency used for the fine-tuning job cost.""" - train_tokens_per_step: NotRequired[Nullable[int]] - r"""The number of tokens consumed by one training step.""" - train_tokens: NotRequired[Nullable[int]] - r"""The total number of tokens used during the fine-tuning process.""" - data_tokens: NotRequired[Nullable[int]] - r"""The total number of tokens in the training dataset.""" - estimated_start_time: NotRequired[Nullable[int]] - deprecated: NotRequired[bool] - epochs: NotRequired[Nullable[float]] - r"""The number of complete passes through the entire training dataset.""" - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: NotRequired[LegacyJobMetadataOutObject] - - -class LegacyJobMetadataOut(BaseModel): - details: str - - expected_duration_seconds: OptionalNullable[int] = UNSET - r"""The approximated time (in seconds) for the fine-tuning process to complete.""" - - cost: OptionalNullable[float] = UNSET - r"""The cost of the fine-tuning job.""" - - cost_currency: OptionalNullable[str] = UNSET - r"""The currency used for the fine-tuning job cost.""" - - train_tokens_per_step: OptionalNullable[int] = UNSET - r"""The number of tokens consumed by one training step.""" - - train_tokens: OptionalNullable[int] = UNSET - r"""The total number of tokens used during the fine-tuning process.""" - - data_tokens: OptionalNullable[int] = UNSET - r"""The total number of tokens in the training dataset.""" - - estimated_start_time: OptionalNullable[int] = UNSET - - deprecated: Optional[bool] = True - - epochs: OptionalNullable[float] = UNSET - r"""The number of complete passes through the entire training dataset.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - object: Optional[LegacyJobMetadataOutObject] = "job.metadata" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "deprecated", - "epochs", - "training_steps", - "object", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "epochs", - "training_steps", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraries_delete_v1op.py b/src/mistralai/models/libraries_delete_v1op.py deleted file mode 100644 index 56f8f8a8..00000000 --- a/src/mistralai/models/libraries_delete_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDeleteV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_list_v1op.py b/src/mistralai/models/libraries_documents_list_v1op.py deleted file mode 100644 index 04a3ed25..00000000 --- a/src/mistralai/models/libraries_documents_list_v1op.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class LibrariesDocumentsListV1RequestTypedDict(TypedDict): - library_id: str - search: NotRequired[Nullable[str]] - page_size: NotRequired[int] - page: NotRequired[int] - sort_by: NotRequired[str] - sort_order: NotRequired[str] - - -class LibrariesDocumentsListV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - search: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - sort_by: Annotated[ - Optional[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = "created_at" - - sort_order: Annotated[ - Optional[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = "desc" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["search", "page_size", "page", "sort_by", "sort_order"] - nullable_fields = ["search"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraries_documents_update_v1op.py b/src/mistralai/models/libraries_documents_update_v1op.py deleted file mode 100644 index 5551d5ee..00000000 --- a/src/mistralai/models/libraries_documents_update_v1op.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - document_update_in: DocumentUpdateInTypedDict - - -class LibrariesDocumentsUpdateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_update_in: Annotated[ - DocumentUpdateIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraries_get_v1op.py b/src/mistralai/models/libraries_get_v1op.py deleted file mode 100644 index b87090f6..00000000 --- a/src/mistralai/models/libraries_get_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesGetV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesGetV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_share_list_v1op.py b/src/mistralai/models/libraries_share_list_v1op.py deleted file mode 100644 index b276d756..00000000 --- a/src/mistralai/models/libraries_share_list_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareListV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesShareListV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_update_v1op.py b/src/mistralai/models/libraries_update_v1op.py deleted file mode 100644 index c93895d9..00000000 --- a/src/mistralai/models/libraries_update_v1op.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesUpdateV1RequestTypedDict(TypedDict): - library_id: str - library_in_update: LibraryInUpdateTypedDict - - -class LibrariesUpdateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - library_in_update: Annotated[ - LibraryInUpdate, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraryin.py b/src/mistralai/models/libraryin.py deleted file mode 100644 index 872d494d..00000000 --- a/src/mistralai/models/libraryin.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInTypedDict(TypedDict): - name: str - description: NotRequired[Nullable[str]] - chunk_size: NotRequired[Nullable[int]] - - -class LibraryIn(BaseModel): - name: str - - description: OptionalNullable[str] = UNSET - - chunk_size: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "chunk_size"] - nullable_fields = ["description", "chunk_size"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraryinupdate.py b/src/mistralai/models/libraryinupdate.py deleted file mode 100644 index 6e8ab81a..00000000 --- a/src/mistralai/models/libraryinupdate.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInUpdateTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class LibraryInUpdate(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraryout.py b/src/mistralai/models/libraryout.py deleted file mode 100644 index 6a13130d..00000000 --- a/src/mistralai/models/libraryout.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryOutTypedDict(TypedDict): - id: str - name: str - created_at: datetime - updated_at: datetime - owner_id: str - owner_type: str - total_size: int - nb_documents: int - chunk_size: Nullable[int] - emoji: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - generated_name: NotRequired[Nullable[str]] - generated_description: NotRequired[Nullable[str]] - explicit_user_members_count: NotRequired[Nullable[int]] - explicit_workspace_members_count: NotRequired[Nullable[int]] - org_sharing_role: NotRequired[Nullable[str]] - - -class LibraryOut(BaseModel): - id: str - - name: str - - created_at: datetime - - updated_at: datetime - - owner_id: str - - owner_type: str - - total_size: int - - nb_documents: int - - chunk_size: Nullable[int] - - emoji: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - generated_name: OptionalNullable[str] = UNSET - - generated_description: OptionalNullable[str] = UNSET - - explicit_user_members_count: OptionalNullable[int] = UNSET - - explicit_workspace_members_count: OptionalNullable[int] = UNSET - - org_sharing_role: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "emoji", - "description", - "generated_name", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - ] - nullable_fields = [ - "chunk_size", - "emoji", - "description", - "generated_name", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/listdocumentout.py b/src/mistralai/models/listdocumentout.py deleted file mode 100644 index 9d39e087..00000000 --- a/src/mistralai/models/listdocumentout.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documentout import DocumentOut, DocumentOutTypedDict -from .paginationinfo import PaginationInfo, PaginationInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListDocumentOutTypedDict(TypedDict): - pagination: PaginationInfoTypedDict - data: List[DocumentOutTypedDict] - - -class ListDocumentOut(BaseModel): - pagination: PaginationInfo - - data: List[DocumentOut] diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py deleted file mode 100644 index b032f632..00000000 --- a/src/mistralai/models/listfilesout.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .fileschema import FileSchema, FileSchemaTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListFilesOutTypedDict(TypedDict): - data: List[FileSchemaTypedDict] - object: str - total: int - - -class ListFilesOut(BaseModel): - data: List[FileSchema] - - object: str - - total: int diff --git a/src/mistralai/models/listlibraryout.py b/src/mistralai/models/listlibraryout.py deleted file mode 100644 index 1e647fe1..00000000 --- a/src/mistralai/models/listlibraryout.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .libraryout import LibraryOut, LibraryOutTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListLibraryOutTypedDict(TypedDict): - data: List[LibraryOutTypedDict] - - -class ListLibraryOut(BaseModel): - data: List[LibraryOut] diff --git a/src/mistralai/models/messageinputcontentchunks.py b/src/mistralai/models/messageinputcontentchunks.py deleted file mode 100644 index 47704211..00000000 --- a/src/mistralai/models/messageinputcontentchunks.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageInputContentChunksTypedDict = TypeAliasType( - "MessageInputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ToolFileChunkTypedDict, - ], -) - - -MessageInputContentChunks = TypeAliasType( - "MessageInputContentChunks", - Union[TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk], -) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py deleted file mode 100644 index c14ad5ae..00000000 --- a/src/mistralai/models/messageinputentry.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -Object = Literal["entry"] - -MessageInputEntryType = Literal["message.input"] - -MessageInputEntryRole = Literal["assistant", "user"] - -MessageInputEntryContentTypedDict = TypeAliasType( - "MessageInputEntryContentTypedDict", - Union[str, List[MessageInputContentChunksTypedDict]], -) - - -MessageInputEntryContent = TypeAliasType( - "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] -) - - -class MessageInputEntryTypedDict(TypedDict): - r"""Representation of an input message inside the conversation.""" - - role: MessageInputEntryRole - content: MessageInputEntryContentTypedDict - object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - prefix: NotRequired[bool] - - -class MessageInputEntry(BaseModel): - r"""Representation of an input message inside the conversation.""" - - role: MessageInputEntryRole - - content: MessageInputEntryContent - - object: Optional[Object] = "entry" - - type: Optional[MessageInputEntryType] = "message.input" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - prefix: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "prefix", - ] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py deleted file mode 100644 index e83fb3a9..00000000 --- a/src/mistralai/models/messageoutputcontentchunks.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageOutputContentChunksTypedDict = TypeAliasType( - "MessageOutputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ToolFileChunkTypedDict, - ToolReferenceChunkTypedDict, - ], -) - - -MessageOutputContentChunks = TypeAliasType( - "MessageOutputContentChunks", - Union[ - TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk - ], -) diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py deleted file mode 100644 index 1c2e4107..00000000 --- a/src/mistralai/models/messageoutputentry.py +++ /dev/null @@ -1,100 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEntryObject = Literal["entry"] - -MessageOutputEntryType = Literal["message.output"] - -MessageOutputEntryRole = Literal["assistant"] - -MessageOutputEntryContentTypedDict = TypeAliasType( - "MessageOutputEntryContentTypedDict", - Union[str, List[MessageOutputContentChunksTypedDict]], -) - - -MessageOutputEntryContent = TypeAliasType( - "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] -) - - -class MessageOutputEntryTypedDict(TypedDict): - content: MessageOutputEntryContentTypedDict - object: NotRequired[MessageOutputEntryObject] - type: NotRequired[MessageOutputEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - agent_id: NotRequired[Nullable[str]] - model: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEntryRole] - - -class MessageOutputEntry(BaseModel): - content: MessageOutputEntryContent - - object: Optional[MessageOutputEntryObject] = "entry" - - type: Optional[MessageOutputEntryType] = "message.output" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - agent_id: OptionalNullable[str] = UNSET - - model: OptionalNullable[str] = UNSET - - role: Optional[MessageOutputEntryRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "agent_id", - "model", - "role", - ] - nullable_fields = ["completed_at", "agent_id", "model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py deleted file mode 100644 index 474cb081..00000000 --- a/src/mistralai/models/messageoutputevent.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEventType = Literal["message.output.delta"] - -MessageOutputEventRole = Literal["assistant"] - -MessageOutputEventContentTypedDict = TypeAliasType( - "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] -) - - -MessageOutputEventContent = TypeAliasType( - "MessageOutputEventContent", Union[str, OutputContentChunks] -) - - -class MessageOutputEventTypedDict(TypedDict): - id: str - content: MessageOutputEventContentTypedDict - type: NotRequired[MessageOutputEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - content_index: NotRequired[int] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEventRole] - - -class MessageOutputEvent(BaseModel): - id: str - - content: MessageOutputEventContent - - type: Optional[MessageOutputEventType] = "message.output.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 - - content_index: Optional[int] = 0 - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - role: Optional[MessageOutputEventRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "type", - "created_at", - "output_index", - "content_index", - "model", - "agent_id", - "role", - ] - nullable_fields = ["model", "agent_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py deleted file mode 100644 index 930b5c21..00000000 --- a/src/mistralai/models/metricout.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class MetricOutTypedDict(TypedDict): - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - train_loss: NotRequired[Nullable[float]] - valid_loss: NotRequired[Nullable[float]] - valid_mean_token_accuracy: NotRequired[Nullable[float]] - - -class MetricOut(BaseModel): - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - train_loss: OptionalNullable[float] = UNSET - - valid_loss: OptionalNullable[float] = UNSET - - valid_mean_token_accuracy: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py deleted file mode 100644 index 0ffd6787..00000000 --- a/src/mistralai/models/mistralpromptmode.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py deleted file mode 100644 index 54c5f2a2..00000000 --- a/src/mistralai/models/modelcapabilities.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ModelCapabilitiesTypedDict(TypedDict): - completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] - function_calling: NotRequired[bool] - fine_tuning: NotRequired[bool] - vision: NotRequired[bool] - classification: NotRequired[bool] - - -class ModelCapabilities(BaseModel): - completion_chat: Optional[bool] = True - - completion_fim: Optional[bool] = False - - function_calling: Optional[bool] = True - - fine_tuning: Optional[bool] = False - - vision: Optional[bool] = False - - classification: Optional[bool] = False diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py deleted file mode 100644 index 4ced79ea..00000000 --- a/src/mistralai/models/modelconversation.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ModelConversationToolsTypedDict = TypeAliasType( - "ModelConversationToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -ModelConversationTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -ModelConversationObject = Literal["conversation"] - - -class ModelConversationTypedDict(TypedDict): - id: str - created_at: datetime - updated_at: datetime - model: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[ModelConversationToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - name: NotRequired[Nullable[str]] - r"""Name given to the conversation.""" - description: NotRequired[Nullable[str]] - r"""Description of the what the conversation is about.""" - object: NotRequired[ModelConversationObject] - - -class ModelConversation(BaseModel): - id: str - - created_at: datetime - - updated_at: datetime - - model: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[ModelConversationTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - name: OptionalNullable[str] = UNSET - r"""Name given to the conversation.""" - - description: OptionalNullable[str] = UNSET - r"""Description of the what the conversation is about.""" - - object: Optional[ModelConversationObject] = "conversation" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "name", - "description", - "object", - ] - nullable_fields = ["instructions", "name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py deleted file mode 100644 index 394cb3fa..00000000 --- a/src/mistralai/models/modellist.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -DataTypedDict = TypeAliasType( - "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] -) - - -Data = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ModelListTypedDict(TypedDict): - object: NotRequired[str] - data: NotRequired[List[DataTypedDict]] - - -class ModelList(BaseModel): - object: Optional[str] = "list" - - data: Optional[List[Data]] = None diff --git a/src/mistralai/models/moderationobject.py b/src/mistralai/models/moderationobject.py deleted file mode 100644 index 5eff2d2a..00000000 --- a/src/mistralai/models/moderationobject.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class ModerationObjectTypedDict(TypedDict): - categories: NotRequired[Dict[str, bool]] - r"""Moderation result thresholds""" - category_scores: NotRequired[Dict[str, float]] - r"""Moderation result""" - - -class ModerationObject(BaseModel): - categories: Optional[Dict[str, bool]] = None - r"""Moderation result thresholds""" - - category_scores: Optional[Dict[str, float]] = None - r"""Moderation result""" diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py deleted file mode 100644 index cec0acf4..00000000 --- a/src/mistralai/models/ocrimageobject.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class OCRImageObjectTypedDict(TypedDict): - id: str - r"""Image ID for extracted image in a page""" - top_left_x: Nullable[int] - r"""X coordinate of top-left corner of the extracted image""" - top_left_y: Nullable[int] - r"""Y coordinate of top-left corner of the extracted image""" - bottom_right_x: Nullable[int] - r"""X coordinate of bottom-right corner of the extracted image""" - bottom_right_y: Nullable[int] - r"""Y coordinate of bottom-right corner of the extracted image""" - image_base64: NotRequired[Nullable[str]] - r"""Base64 string of the extracted image""" - image_annotation: NotRequired[Nullable[str]] - r"""Annotation of the extracted image in json str""" - - -class OCRImageObject(BaseModel): - id: str - r"""Image ID for extracted image in a page""" - - top_left_x: Nullable[int] - r"""X coordinate of top-left corner of the extracted image""" - - top_left_y: Nullable[int] - r"""Y coordinate of top-left corner of the extracted image""" - - bottom_right_x: Nullable[int] - r"""X coordinate of bottom-right corner of the extracted image""" - - bottom_right_y: Nullable[int] - r"""Y coordinate of bottom-right corner of the extracted image""" - - image_base64: OptionalNullable[str] = UNSET - r"""Base64 string of the extracted image""" - - image_annotation: OptionalNullable[str] = UNSET - r"""Annotation of the extracted image in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrpagedimensions.py b/src/mistralai/models/ocrpagedimensions.py deleted file mode 100644 index d1aeb54d..00000000 --- a/src/mistralai/models/ocrpagedimensions.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class OCRPageDimensionsTypedDict(TypedDict): - dpi: int - r"""Dots per inch of the page-image""" - height: int - r"""Height of the image in pixels""" - width: int - r"""Width of the image in pixels""" - - -class OCRPageDimensions(BaseModel): - dpi: int - r"""Dots per inch of the page-image""" - - height: int - r"""Height of the image in pixels""" - - width: int - r"""Width of the image in pixels""" diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py deleted file mode 100644 index 94624a16..00000000 --- a/src/mistralai/models/ocrpageobject.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict -from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List -from typing_extensions import TypedDict - - -class OCRPageObjectTypedDict(TypedDict): - index: int - r"""The page index in a pdf document starting from 0""" - markdown: str - r"""The markdown string response of the page""" - images: List[OCRImageObjectTypedDict] - r"""List of all extracted images in the page""" - dimensions: Nullable[OCRPageDimensionsTypedDict] - r"""The dimensions of the PDF Page's screenshot image""" - - -class OCRPageObject(BaseModel): - index: int - r"""The page index in a pdf document starting from 0""" - - markdown: str - r"""The markdown string response of the page""" - - images: List[OCRImageObject] - r"""List of all extracted images in the page""" - - dimensions: Nullable[OCRPageDimensions] - r"""The dimensions of the PDF Page's screenshot image""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["dimensions"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py deleted file mode 100644 index df932c2a..00000000 --- a/src/mistralai/models/ocrrequest.py +++ /dev/null @@ -1,114 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .filechunk import FileChunk, FileChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", - Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], -) -r"""Document to run OCR on""" - - -Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) -r"""Document to run OCR on""" - - -class OCRRequestTypedDict(TypedDict): - model: Nullable[str] - document: DocumentTypedDict - r"""Document to run OCR on""" - id: NotRequired[str] - pages: NotRequired[Nullable[List[int]]] - r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" - include_image_base64: NotRequired[Nullable[bool]] - r"""Include image URLs in response""" - image_limit: NotRequired[Nullable[int]] - r"""Max images to extract""" - image_min_size: NotRequired[Nullable[int]] - r"""Minimum height and width of image to extract""" - bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - - -class OCRRequest(BaseModel): - model: Nullable[str] - - document: Document - r"""Document to run OCR on""" - - id: Optional[str] = None - - pages: OptionalNullable[List[int]] = UNSET - r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" - - include_image_base64: OptionalNullable[bool] = UNSET - r"""Include image URLs in response""" - - image_limit: OptionalNullable[int] = UNSET - r"""Max images to extract""" - - image_min_size: OptionalNullable[int] = UNSET - r"""Minimum height and width of image to extract""" - - bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - - document_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py deleted file mode 100644 index 7b65bee7..00000000 --- a/src/mistralai/models/ocrresponse.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict -from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List -from typing_extensions import NotRequired, TypedDict - - -class OCRResponseTypedDict(TypedDict): - pages: List[OCRPageObjectTypedDict] - r"""List of OCR info for pages.""" - model: str - r"""The model used to generate the OCR.""" - usage_info: OCRUsageInfoTypedDict - document_annotation: NotRequired[Nullable[str]] - r"""Formatted response in the request_format if provided in json str""" - - -class OCRResponse(BaseModel): - pages: List[OCRPageObject] - r"""List of OCR info for pages.""" - - model: str - r"""The model used to generate the OCR.""" - - usage_info: OCRUsageInfo - - document_annotation: OptionalNullable[str] = UNSET - r"""Formatted response in the request_format if provided in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrusageinfo.py b/src/mistralai/models/ocrusageinfo.py deleted file mode 100644 index 36c9f826..00000000 --- a/src/mistralai/models/ocrusageinfo.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class OCRUsageInfoTypedDict(TypedDict): - pages_processed: int - r"""Number of pages processed""" - doc_size_bytes: NotRequired[Nullable[int]] - r"""Document size in bytes""" - - -class OCRUsageInfo(BaseModel): - pages_processed: int - r"""Number of pages processed""" - - doc_size_bytes: OptionalNullable[int] = UNSET - r"""Document size in bytes""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py deleted file mode 100644 index 6b7e39ea..00000000 --- a/src/mistralai/models/outputcontentchunks.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -OutputContentChunksTypedDict = TypeAliasType( - "OutputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ToolFileChunkTypedDict, - ToolReferenceChunkTypedDict, - ], -) - - -OutputContentChunks = TypeAliasType( - "OutputContentChunks", - Union[ - TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk - ], -) diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py deleted file mode 100644 index 7937c9d1..00000000 --- a/src/mistralai/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py deleted file mode 100644 index 4a5503f2..00000000 --- a/src/mistralai/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference"] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py deleted file mode 100644 index 296cb430..00000000 --- a/src/mistralai/models/responsedoneevent.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseDoneEventType = Literal["conversation.response.done"] - - -class ResponseDoneEventTypedDict(TypedDict): - usage: ConversationUsageInfoTypedDict - type: NotRequired[ResponseDoneEventType] - created_at: NotRequired[datetime] - - -class ResponseDoneEvent(BaseModel): - usage: ConversationUsageInfo - - type: Optional[ResponseDoneEventType] = "conversation.response.done" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py deleted file mode 100644 index e4190d17..00000000 --- a/src/mistralai/models/responseerrorevent.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseErrorEventType = Literal["conversation.response.error"] - - -class ResponseErrorEventTypedDict(TypedDict): - message: str - code: int - type: NotRequired[ResponseErrorEventType] - created_at: NotRequired[datetime] - - -class ResponseErrorEvent(BaseModel): - message: str - - code: int - - type: Optional[ResponseErrorEventType] = "conversation.response.error" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py deleted file mode 100644 index c9319989..00000000 --- a/src/mistralai/models/responseformat.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .responseformats import ResponseFormats -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ResponseFormatTypedDict(TypedDict): - type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] - - -class ResponseFormat(BaseModel): - type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py deleted file mode 100644 index 08c39951..00000000 --- a/src/mistralai/models/responseformats.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py deleted file mode 100644 index 6acb483e..00000000 --- a/src/mistralai/models/responsestartedevent.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseStartedEventType = Literal["conversation.response.started"] - - -class ResponseStartedEventTypedDict(TypedDict): - conversation_id: str - type: NotRequired[ResponseStartedEventType] - created_at: NotRequired[datetime] - - -class ResponseStartedEvent(BaseModel): - conversation_id: str - - type: Optional[ResponseStartedEventType] = "conversation.response.started" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py deleted file mode 100644 index bfe62474..00000000 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to retrieve.""" - - -class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to retrieve.""" - - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - Union[BaseModelCardTypedDict, FTModelCardTypedDict], -) -r"""Successful Response""" - - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] -r"""Successful Response""" diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py deleted file mode 100644 index 7d734b0f..00000000 --- a/src/mistralai/models/retrievefileout.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, NotRequired, TypedDict - - -class RetrieveFileOutTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - deleted: bool - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class RetrieveFileOut(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - - sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - - source: Annotated[Source, PlainValidator(validate_open_enum(False))] - - deleted: bool - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py deleted file mode 100644 index adc90ec7..00000000 --- a/src/mistralai/models/sampletype.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -SampleType = Union[ - Literal["pretrain", "instruct", "batch_request", "batch_result", "batch_error"], - UnrecognizedStr, -] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py deleted file mode 100644 index 03216cbf..00000000 --- a/src/mistralai/models/sdkerror.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from dataclasses import dataclass -from typing import Optional -import httpx - - -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" - - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None - - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" - - return f"{self.message}: Status {self.status_code}{body}" diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py deleted file mode 100644 index cf05ba8f..00000000 --- a/src/mistralai/models/security.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, SecurityMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class SecurityTypedDict(TypedDict): - api_key: NotRequired[str] - - -class Security(BaseModel): - api_key: Annotated[ - Optional[str], - FieldMetadata( - security=SecurityMetadata( - scheme=True, - scheme_type="http", - sub_type="bearer", - field_name="Authorization", - ) - ), - ] = None diff --git a/src/mistralai/models/shareenum.py b/src/mistralai/models/shareenum.py deleted file mode 100644 index c2945514..00000000 --- a/src/mistralai/models/shareenum.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -ShareEnum = Union[Literal["Viewer", "Editor"], UnrecognizedStr] diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py deleted file mode 100644 index b9df5f9d..00000000 --- a/src/mistralai/models/sharingdelete.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .entitytype import EntityType -from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict - - -class SharingDeleteTypedDict(TypedDict): - org_id: str - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - - -class SharingDelete(BaseModel): - org_id: str - - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - - share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] - r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py deleted file mode 100644 index af20fd14..00000000 --- a/src/mistralai/models/sharingin.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .entitytype import EntityType -from .shareenum import ShareEnum -from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict - - -class SharingInTypedDict(TypedDict): - org_id: str - level: ShareEnum - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - - -class SharingIn(BaseModel): - org_id: str - - level: Annotated[ShareEnum, PlainValidator(validate_open_enum(False))] - - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - - share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] - r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/sharingout.py b/src/mistralai/models/sharingout.py deleted file mode 100644 index a78a7764..00000000 --- a/src/mistralai/models/sharingout.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingOutTypedDict(TypedDict): - library_id: str - org_id: str - role: str - share_with_type: str - share_with_uuid: str - user_id: NotRequired[Nullable[str]] - - -class SharingOut(BaseModel): - library_id: str - - org_id: str - - role: str - - share_with_type: str - - share_with_uuid: str - - user_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["user_id"] - nullable_fields = ["user_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py deleted file mode 100644 index c21550f2..00000000 --- a/src/mistralai/models/source.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -Source = Union[Literal["upload", "repository", "mistral"], UnrecognizedStr] diff --git a/src/mistralai/models/ssetypes.py b/src/mistralai/models/ssetypes.py deleted file mode 100644 index 796f0327..00000000 --- a/src/mistralai/models/ssetypes.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -SSETypes = Literal[ - "conversation.response.started", - "conversation.response.done", - "conversation.response.error", - "message.output.delta", - "tool.execution.started", - "tool.execution.delta", - "tool.execution.done", - "agent.handoff.started", - "agent.handoff.done", - "function.call.delta", -] -r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py deleted file mode 100644 index 7827ac4b..00000000 --- a/src/mistralai/models/systemmessage.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] -) - - -SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] -) - - -Role = Literal["system"] - - -class SystemMessageTypedDict(TypedDict): - content: SystemMessageContentTypedDict - role: NotRequired[Role] - - -class SystemMessage(BaseModel): - content: SystemMessageContent - - role: Optional[Role] = "system" diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py deleted file mode 100644 index 02b115f6..00000000 --- a/src/mistralai/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TextChunkType = Literal["text"] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[TextChunkType] - - -class TextChunk(BaseModel): - text: str - - type: Optional[TextChunkType] = "text" diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py deleted file mode 100644 index 24b466f9..00000000 --- a/src/mistralai/models/thinkchunk.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ThinkingTypedDict = TypeAliasType( - "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] -) - - -Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) - - -ThinkChunkType = Literal["thinking"] - - -class ThinkChunkTypedDict(TypedDict): - thinking: List[ThinkingTypedDict] - closed: NotRequired[bool] - r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] - - -class ThinkChunk(BaseModel): - thinking: List[Thinking] - - closed: Optional[bool] = None - r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py deleted file mode 100644 index dd1b6446..00000000 --- a/src/mistralai/models/timestampgranularity.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -TimestampGranularity = Literal["segment"] diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py deleted file mode 100644 index 6e746df3..00000000 --- a/src/mistralai/models/tool.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[ToolTypes] - - -class Tool(BaseModel): - function: Function - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py deleted file mode 100644 index 7d3a3c6b..00000000 --- a/src/mistralai/models/toolcall.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncall import FunctionCall, FunctionCallTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class MetadataTypedDict(TypedDict): - pass - - -class Metadata(BaseModel): - pass - - -class ToolCallTypedDict(TypedDict): - function: FunctionCallTypedDict - id: NotRequired[str] - type: NotRequired[ToolTypes] - index: NotRequired[int] - metadata: NotRequired[Nullable[MetadataTypedDict]] - - -class ToolCall(BaseModel): - function: FunctionCall - - id: Optional[str] = "null" - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) - - index: Optional[int] = 0 - - metadata: OptionalNullable[Metadata] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["id", "type", "index", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py deleted file mode 100644 index 3b7d60e0..00000000 --- a/src/mistralai/models/toolchoice.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functionname import FunctionName, FunctionNameTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ToolChoiceTypedDict(TypedDict): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionNameTypedDict - r"""this restriction of `Function` is used to select a specific function to call""" - type: NotRequired[ToolTypes] - - -class ToolChoice(BaseModel): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionName - r"""this restriction of `Function` is used to select a specific function to call""" - - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) diff --git a/src/mistralai/models/toolchoiceenum.py b/src/mistralai/models/toolchoiceenum.py deleted file mode 100644 index 8e6a6ad8..00000000 --- a/src/mistralai/models/toolchoiceenum.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ToolChoiceEnum = Literal["auto", "none", "any", "required"] diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py deleted file mode 100644 index 99b97e68..00000000 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ToolExecutionDeltaEventType = Literal["tool.execution.delta"] - - -class ToolExecutionDeltaEventTypedDict(TypedDict): - id: str - name: BuiltInConnectors - arguments: str - type: NotRequired[ToolExecutionDeltaEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class ToolExecutionDeltaEvent(BaseModel): - id: str - - name: BuiltInConnectors - - arguments: str - - type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py deleted file mode 100644 index c73d943a..00000000 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ToolExecutionDoneEventType = Literal["tool.execution.done"] - - -class ToolExecutionDoneEventTypedDict(TypedDict): - id: str - name: BuiltInConnectors - type: NotRequired[ToolExecutionDoneEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - info: NotRequired[Dict[str, Any]] - - -class ToolExecutionDoneEvent(BaseModel): - id: str - - name: BuiltInConnectors - - type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 - - info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py deleted file mode 100644 index db503ea8..00000000 --- a/src/mistralai/models/toolexecutionentry.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ToolExecutionEntryObject = Literal["entry"] - -ToolExecutionEntryType = Literal["tool.execution"] - - -class ToolExecutionEntryTypedDict(TypedDict): - name: BuiltInConnectors - arguments: str - object: NotRequired[ToolExecutionEntryObject] - type: NotRequired[ToolExecutionEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - info: NotRequired[Dict[str, Any]] - - -class ToolExecutionEntry(BaseModel): - name: BuiltInConnectors - - arguments: str - - object: Optional[ToolExecutionEntryObject] = "entry" - - type: Optional[ToolExecutionEntryType] = "tool.execution" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - info: Optional[Dict[str, Any]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py deleted file mode 100644 index 7a54058f..00000000 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ToolExecutionStartedEventType = Literal["tool.execution.started"] - - -class ToolExecutionStartedEventTypedDict(TypedDict): - id: str - name: BuiltInConnectors - arguments: str - type: NotRequired[ToolExecutionStartedEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class ToolExecutionStartedEvent(BaseModel): - id: str - - name: BuiltInConnectors - - arguments: str - - type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py deleted file mode 100644 index 77c07d6d..00000000 --- a/src/mistralai/models/toolfilechunk.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ToolFileChunkType = Literal["tool_file"] - - -class ToolFileChunkTypedDict(TypedDict): - tool: BuiltInConnectors - file_id: str - type: NotRequired[ToolFileChunkType] - file_name: NotRequired[Nullable[str]] - file_type: NotRequired[Nullable[str]] - - -class ToolFileChunk(BaseModel): - tool: BuiltInConnectors - - file_id: str - - type: Optional[ToolFileChunkType] = "tool_file" - - file_name: OptionalNullable[str] = UNSET - - file_type: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "file_name", "file_type"] - nullable_fields = ["file_name", "file_type"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py deleted file mode 100644 index 82f62e0f..00000000 --- a/src/mistralai/models/toolmessage.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolMessageContentTypedDict = TypeAliasType( - "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) - - -ToolMessageRole = Literal["tool"] - - -class ToolMessageTypedDict(TypedDict): - content: Nullable[ToolMessageContentTypedDict] - tool_call_id: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] - - -class ToolMessage(BaseModel): - content: Nullable[ToolMessageContent] - - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - role: Optional[ToolMessageRole] = "tool" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py deleted file mode 100644 index e50b8451..00000000 --- a/src/mistralai/models/toolreferencechunk.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ToolReferenceChunkType = Literal["tool_reference"] - - -class ToolReferenceChunkTypedDict(TypedDict): - tool: BuiltInConnectors - title: str - type: NotRequired[ToolReferenceChunkType] - url: NotRequired[Nullable[str]] - favicon: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class ToolReferenceChunk(BaseModel): - tool: BuiltInConnectors - - title: str - - type: Optional[ToolReferenceChunkType] = "tool_reference" - - url: OptionalNullable[str] = UNSET - - favicon: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "url", "favicon", "description"] - nullable_fields = ["url", "favicon", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/tooltypes.py b/src/mistralai/models/tooltypes.py deleted file mode 100644 index fb581820..00000000 --- a/src/mistralai/models/tooltypes.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -ToolTypes = Union[Literal["function"], UnrecognizedStr] diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py deleted file mode 100644 index 99bd49dd..00000000 --- a/src/mistralai/models/trainingfile.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class TrainingFileTypedDict(TypedDict): - file_id: str - weight: NotRequired[float] - - -class TrainingFile(BaseModel): - file_id: str - - weight: Optional[float] = 1 diff --git a/src/mistralai/models/transcriptionresponse.py b/src/mistralai/models/transcriptionresponse.py deleted file mode 100644 index 54a98a5b..00000000 --- a/src/mistralai/models/transcriptionresponse.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import NotRequired, TypedDict - - -class TranscriptionResponseTypedDict(TypedDict): - model: str - text: str - usage: UsageInfoTypedDict - language: Nullable[str] - segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - - -class TranscriptionResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - model: str - - text: str - - usage: UsageInfo - - language: Nullable[str] - - segments: Optional[List[TranscriptionSegmentChunk]] = None - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py deleted file mode 100644 index 53f1b397..00000000 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["transcription_segment"] - - -class TranscriptionSegmentChunkTypedDict(TypedDict): - text: str - start: float - end: float - type: NotRequired[Type] - - -class TranscriptionSegmentChunk(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - start: float - - end: float - - type: Optional[Type] = "transcription_segment" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py deleted file mode 100644 index ffd0e080..00000000 --- a/src/mistralai/models/transcriptionstreamdone.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamDoneType = Literal["transcription.done"] - - -class TranscriptionStreamDoneTypedDict(TypedDict): - model: str - text: str - usage: UsageInfoTypedDict - language: Nullable[str] - segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - type: NotRequired[TranscriptionStreamDoneType] - - -class TranscriptionStreamDone(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - model: str - - text: str - - usage: UsageInfo - - language: Nullable[str] - - segments: Optional[List[TranscriptionSegmentChunk]] = None - - type: Optional[TranscriptionStreamDoneType] = "transcription.done" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["segments", "type"] - nullable_fields = ["language"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamevents.py b/src/mistralai/models/transcriptionstreamevents.py deleted file mode 100644 index 8207c03f..00000000 --- a/src/mistralai/models/transcriptionstreamevents.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionstreamdone import ( - TranscriptionStreamDone, - TranscriptionStreamDoneTypedDict, -) -from .transcriptionstreameventtypes import TranscriptionStreamEventTypes -from .transcriptionstreamlanguage import ( - TranscriptionStreamLanguage, - TranscriptionStreamLanguageTypedDict, -) -from .transcriptionstreamsegmentdelta import ( - TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaTypedDict, -) -from .transcriptionstreamtextdelta import ( - TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TranscriptionStreamEventsDataTypedDict = TypeAliasType( - "TranscriptionStreamEventsDataTypedDict", - Union[ - TranscriptionStreamTextDeltaTypedDict, - TranscriptionStreamLanguageTypedDict, - TranscriptionStreamSegmentDeltaTypedDict, - TranscriptionStreamDoneTypedDict, - ], -) - - -TranscriptionStreamEventsData = Annotated[ - Union[ - Annotated[TranscriptionStreamDone, Tag("transcription.done")], - Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], - Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], - Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class TranscriptionStreamEventsTypedDict(TypedDict): - event: TranscriptionStreamEventTypes - data: TranscriptionStreamEventsDataTypedDict - - -class TranscriptionStreamEvents(BaseModel): - event: TranscriptionStreamEventTypes - - data: TranscriptionStreamEventsData diff --git a/src/mistralai/models/transcriptionstreameventtypes.py b/src/mistralai/models/transcriptionstreameventtypes.py deleted file mode 100644 index 4a910f0a..00000000 --- a/src/mistralai/models/transcriptionstreameventtypes.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -TranscriptionStreamEventTypes = Literal[ - "transcription.language", - "transcription.segment", - "transcription.text.delta", - "transcription.done", -] diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py deleted file mode 100644 index 8fc2aa6e..00000000 --- a/src/mistralai/models/transcriptionstreamlanguage.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamLanguageType = Literal["transcription.language"] - - -class TranscriptionStreamLanguageTypedDict(TypedDict): - audio_language: str - type: NotRequired[TranscriptionStreamLanguageType] - - -class TranscriptionStreamLanguage(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - audio_language: str - - type: Optional[TranscriptionStreamLanguageType] = "transcription.language" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py deleted file mode 100644 index 61b396b4..00000000 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment"] - - -class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): - text: str - start: float - end: float - type: NotRequired[TranscriptionStreamSegmentDeltaType] - - -class TranscriptionStreamSegmentDelta(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - start: float - - end: float - - type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py deleted file mode 100644 index 8f0b0e59..00000000 --- a/src/mistralai/models/transcriptionstreamtextdelta.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta"] - - -class TranscriptionStreamTextDeltaTypedDict(TypedDict): - text: str - type: NotRequired[TranscriptionStreamTextDeltaType] - - -class TranscriptionStreamTextDelta(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py deleted file mode 100644 index 6b2f730d..00000000 --- a/src/mistralai/models/unarchiveftmodelout.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -UnarchiveFTModelOutObject = Literal["model"] - - -class UnarchiveFTModelOutTypedDict(TypedDict): - id: str - object: NotRequired[UnarchiveFTModelOutObject] - archived: NotRequired[bool] - - -class UnarchiveFTModelOut(BaseModel): - id: str - - object: Optional[UnarchiveFTModelOutObject] = "model" - - archived: Optional[bool] = False diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py deleted file mode 100644 index 1bd0eaf2..00000000 --- a/src/mistralai/models/updateftmodelin.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class UpdateFTModelInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class UpdateFTModelIn(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py deleted file mode 100644 index 8f9f1067..00000000 --- a/src/mistralai/models/uploadfileout.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, NotRequired, TypedDict - - -class UploadFileOutTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class UploadFileOut(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - - sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - - source: Annotated[Source, PlainValidator(validate_open_enum(False))] - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py deleted file mode 100644 index cedad5c1..00000000 --- a/src/mistralai/models/usageinfo.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class UsageInfoTypedDict(TypedDict): - prompt_tokens: NotRequired[int] - completion_tokens: NotRequired[int] - total_tokens: NotRequired[int] - prompt_audio_seconds: NotRequired[Nullable[int]] - - -class UsageInfo(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - prompt_tokens: Optional[int] = 0 - - completion_tokens: Optional[int] = 0 - - total_tokens: Optional[int] = 0 - - prompt_audio_seconds: OptionalNullable[int] = UNSET - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py deleted file mode 100644 index 049bc755..00000000 --- a/src/mistralai/models/usermessage.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -UserMessageContentTypedDict = TypeAliasType( - "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) - - -UserMessageRole = Literal["user"] - - -class UserMessageTypedDict(TypedDict): - content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] - - -class UserMessage(BaseModel): - content: Nullable[UserMessageContent] - - role: Optional[UserMessageRole] = "user" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role"] - nullable_fields = ["content"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py deleted file mode 100644 index e971e016..00000000 --- a/src/mistralai/models/validationerror.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Union -from typing_extensions import TypeAliasType, TypedDict - - -LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) - - -Loc = TypeAliasType("Loc", Union[str, int]) - - -class ValidationErrorTypedDict(TypedDict): - loc: List[LocTypedDict] - msg: str - type: str - - -class ValidationError(BaseModel): - loc: List[Loc] - - msg: str - - type: str diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py deleted file mode 100644 index 0789b648..00000000 --- a/src/mistralai/models/wandbintegration.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationType = Literal["wandb"] - - -class WandbIntegrationTypedDict(TypedDict): - project: str - r"""The name of the project that the new run will be created under.""" - api_key: str - r"""The WandB API key to use for authentication.""" - type: NotRequired[WandbIntegrationType] - name: NotRequired[Nullable[str]] - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - run_name: NotRequired[Nullable[str]] - - -class WandbIntegration(BaseModel): - project: str - r"""The name of the project that the new run will be created under.""" - - api_key: str - r"""The WandB API key to use for authentication.""" - - type: Optional[WandbIntegrationType] = "wandb" - - name: OptionalNullable[str] = UNSET - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - - run_name: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] - nullable_fields = ["name", "run_name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py deleted file mode 100644 index a1c2f570..00000000 --- a/src/mistralai/models/wandbintegrationout.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationOutType = Literal["wandb"] - - -class WandbIntegrationOutTypedDict(TypedDict): - project: str - r"""The name of the project that the new run will be created under.""" - type: NotRequired[WandbIntegrationOutType] - name: NotRequired[Nullable[str]] - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - run_name: NotRequired[Nullable[str]] - url: NotRequired[Nullable[str]] - - -class WandbIntegrationOut(BaseModel): - project: str - r"""The name of the project that the new run will be created under.""" - - type: Optional[WandbIntegrationOutType] = "wandb" - - name: OptionalNullable[str] = UNSET - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - - run_name: OptionalNullable[str] = UNSET - - url: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name", "url"] - nullable_fields = ["name", "run_name", "url"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py deleted file mode 100644 index 70fc5626..00000000 --- a/src/mistralai/models/websearchpremiumtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchPremiumToolType = Literal["web_search_premium"] - - -class WebSearchPremiumToolTypedDict(TypedDict): - type: NotRequired[WebSearchPremiumToolType] - - -class WebSearchPremiumTool(BaseModel): - type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py deleted file mode 100644 index 3dfd1c53..00000000 --- a/src/mistralai/models/websearchtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchToolType = Literal["web_search"] - - -class WebSearchToolTypedDict(TypedDict): - type: NotRequired[WebSearchToolType] - - -class WebSearchTool(BaseModel): - type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py deleted file mode 100644 index a7b1d04a..00000000 --- a/src/mistralai/ocr.py +++ /dev/null @@ -1,278 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from typing import Any, List, Mapping, Optional, Union - - -class Ocr(BaseSDK): - r"""OCR API""" - - def process( - self, - *, - model: Nullable[str], - document: Union[models.Document, models.DocumentTypedDict], - id: Optional[str] = None, - pages: OptionalNullable[List[int]] = UNSET, - include_image_base64: OptionalNullable[bool] = UNSET, - image_limit: OptionalNullable[int] = UNSET, - image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.OCRResponse: - r"""OCR - - :param model: - :param document: Document to run OCR on - :param id: - :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 - :param include_image_base64: Include image URLs in response - :param image_limit: Max images to extract - :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.OCRRequest( - model=model, - id=id, - document=utils.get_pydantic_model(document, models.Document), - pages=pages, - include_image_base64=include_image_base64, - image_limit=image_limit, - image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), - ) - - req = self._build_request( - method="POST", - path="/v1/ocr", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.OCRRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.OCRResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) - - async def process_async( - self, - *, - model: Nullable[str], - document: Union[models.Document, models.DocumentTypedDict], - id: Optional[str] = None, - pages: OptionalNullable[List[int]] = UNSET, - include_image_base64: OptionalNullable[bool] = UNSET, - image_limit: OptionalNullable[int] = UNSET, - image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.OCRResponse: - r"""OCR - - :param model: - :param document: Document to run OCR on - :param id: - :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 - :param include_image_base64: Include image URLs in response - :param image_limit: Max images to extract - :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.OCRRequest( - model=model, - id=id, - document=utils.get_pydantic_model(document, models.Document), - pages=pages, - include_image_base64=include_image_base64, - image_limit=image_limit, - image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/ocr", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.OCRRequest - ), - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.OCRResponse) - if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py deleted file mode 100644 index 23d31cc7..00000000 --- a/src/mistralai/sdk.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig -import httpx -import importlib -from mistralai import models, utils -from mistralai._hooks import SDKHooks -from mistralai.types import OptionalNullable, UNSET -from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast -import weakref - -if TYPE_CHECKING: - from mistralai.agents import Agents - from mistralai.audio import Audio - from mistralai.batch import Batch - from mistralai.beta import Beta - from mistralai.chat import Chat - from mistralai.classifiers import Classifiers - from mistralai.embeddings import Embeddings - from mistralai.files import Files - from mistralai.fim import Fim - from mistralai.fine_tuning import FineTuning - from mistralai.models_ import Models - from mistralai.ocr import Ocr - - -class Mistral(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - models: "Models" - r"""Model Management API""" - beta: "Beta" - files: "Files" - r"""Files API""" - fine_tuning: "FineTuning" - batch: "Batch" - chat: "Chat" - r"""Chat Completion API.""" - fim: "Fim" - r"""Fill-in-the-middle API.""" - agents: "Agents" - r"""Agents API.""" - embeddings: "Embeddings" - r"""Embeddings API.""" - classifiers: "Classifiers" - r"""Classifiers API.""" - ocr: "Ocr" - r"""OCR API""" - audio: "Audio" - _sub_sdk_map = { - "models": ("mistralai.models_", "Models"), - "beta": ("mistralai.beta", "Beta"), - "files": ("mistralai.files", "Files"), - "fine_tuning": ("mistralai.fine_tuning", "FineTuning"), - "batch": ("mistralai.batch", "Batch"), - "chat": ("mistralai.chat", "Chat"), - "fim": ("mistralai.fim", "Fim"), - "agents": ("mistralai.agents", "Agents"), - "embeddings": ("mistralai.embeddings", "Embeddings"), - "classifiers": ("mistralai.classifiers", "Classifiers"), - "ocr": ("mistralai.ocr", "Ocr"), - "audio": ("mistralai.audio", "Audio"), - } - - def __init__( - self, - api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, - server: Optional[str] = None, - server_url: Optional[str] = None, - url_params: Optional[Dict[str, str]] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - client_supplied = True - if client is None: - client = httpx.Client() - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(api_key): - # pylint: disable=unnecessary-lambda-assignment - security = lambda: models.Security(api_key=api_key()) - else: - security = models.Security(api_key=api_key) - - if server_url is not None: - if url_params is not None: - server_url = utils.template_url(server_url, url_params) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=server_url, - server=server, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - ) - - hooks = SDKHooks() - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - def __getattr__(self, name: str): - if name in self._sub_sdk_map: - module_path, class_name = self._sub_sdk_map[name] - try: - module = importlib.import_module(module_path) - klass = getattr(module, class_name) - instance = klass(self.sdk_configuration) - setattr(self, name, instance) - return instance - except ImportError as e: - raise AttributeError( - f"Failed to import module {module_path} for attribute {name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" - ) from e - - raise AttributeError( - f"'{type(self).__name__}' object has no attribute '{name}'" - ) - - def __dir__(self): - default_attrs = list(super().__dir__()) - lazy_attrs = list(self._sub_sdk_map.keys()) - return sorted(list(set(default_attrs + lazy_attrs))) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py deleted file mode 100644 index 7e77925d..00000000 --- a/src/mistralai/sdkconfiguration.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) -from .httpclient import AsyncHttpClient, HttpClient -from .utils import Logger, RetryConfig, remove_suffix -from dataclasses import dataclass -from mistralai import models -from mistralai.types import OptionalNullable, UNSET -from pydantic import Field -from typing import Callable, Dict, Optional, Tuple, Union - - -SERVER_EU = "eu" -r"""EU Production server""" -SERVERS = { - SERVER_EU: "https://api.mistral.ai", -} -"""Contains the list of servers available to the SDK""" - - -@dataclass -class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool - debug_logger: Logger - security: Optional[Union[models.Security, Callable[[], models.Security]]] = None - server_url: Optional[str] = "" - server: Optional[str] = "" - language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ - retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) - timeout_ms: Optional[int] = None - - def get_server_details(self) -> Tuple[str, Dict[str, str]]: - if self.server_url is not None and self.server_url: - return remove_suffix(self.server_url, "/"), {} - if not self.server: - self.server = SERVER_EU - - if self.server not in SERVERS: - raise ValueError(f'Invalid server "{self.server}"') - - return SERVERS[self.server], {} diff --git a/src/mistralai/types/__init__.py b/src/mistralai/types/__init__.py deleted file mode 100644 index fc76fe0c..00000000 --- a/src/mistralai/types/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basemodel import ( - BaseModel, - Nullable, - OptionalNullable, - UnrecognizedInt, - UnrecognizedStr, - UNSET, - UNSET_SENTINEL, -) - -__all__ = [ - "BaseModel", - "Nullable", - "OptionalNullable", - "UnrecognizedInt", - "UnrecognizedStr", - "UNSET", - "UNSET_SENTINEL", -] diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py deleted file mode 100644 index 231c2e37..00000000 --- a/src/mistralai/types/basemodel.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from pydantic import ConfigDict, model_serializer -from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union -from typing_extensions import TypeAliasType, TypeAlias - - -class BaseModel(PydanticBaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() - ) - - -class Unset(BaseModel): - @model_serializer(mode="plain") - def serialize_model(self): - return UNSET_SENTINEL - - def __bool__(self) -> Literal[False]: - return False - - -UNSET = Unset() -UNSET_SENTINEL = "~?~unset~?~sentinel~?~" - - -T = TypeVar("T") -if TYPE_CHECKING: - Nullable: TypeAlias = Union[T, None] - OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] -else: - Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) - OptionalNullable = TypeAliasType( - "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) - ) - -UnrecognizedInt: TypeAlias = int -UnrecognizedStr: TypeAlias = str diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py deleted file mode 100644 index 3d078198..00000000 --- a/src/mistralai/utils/__init__.py +++ /dev/null @@ -1,187 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import TYPE_CHECKING -from importlib import import_module - -if TYPE_CHECKING: - from .annotations import get_discriminator - from .datetimes import parse_datetime - from .enums import OpenEnumMeta - from .headers import get_headers, get_response_headers - from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, - ) - from .queryparams import get_query_params - from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig - from .requestbodies import serialize_request_body, SerializedRequestBody - from .security import get_security, get_security_from_env - - from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, - ) - from .url import generate_url, template_url, remove_suffix - from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, - ) - from .logger import Logger, get_body_content, get_default_logger - -__all__ = [ - "BackoffStrategy", - "FieldMetadata", - "find_metadata", - "FormMetadata", - "generate_url", - "get_body_content", - "get_default_logger", - "get_discriminator", - "parse_datetime", - "get_global_from_env", - "get_headers", - "get_pydantic_model", - "get_query_params", - "get_response_headers", - "get_security", - "get_security_from_env", - "HeaderMetadata", - "Logger", - "marshal_json", - "match_content_type", - "match_status_codes", - "match_response", - "MultipartFormMetadata", - "OpenEnumMeta", - "PathParamMetadata", - "QueryParamMetadata", - "remove_suffix", - "Retries", - "retry", - "retry_async", - "RetryConfig", - "RequestMetadata", - "SecurityMetadata", - "serialize_decimal", - "serialize_float", - "serialize_int", - "serialize_request_body", - "SerializedRequestBody", - "stream_to_text", - "stream_to_text_async", - "stream_to_bytes", - "stream_to_bytes_async", - "template_url", - "unmarshal", - "unmarshal_json", - "validate_decimal", - "validate_const", - "validate_float", - "validate_int", - "validate_open_enum", - "cast_partial", -] - -_dynamic_imports: dict[str, str] = { - "BackoffStrategy": ".retries", - "FieldMetadata": ".metadata", - "find_metadata": ".metadata", - "FormMetadata": ".metadata", - "generate_url": ".url", - "get_body_content": ".logger", - "get_default_logger": ".logger", - "get_discriminator": ".annotations", - "parse_datetime": ".datetimes", - "get_global_from_env": ".values", - "get_headers": ".headers", - "get_pydantic_model": ".serializers", - "get_query_params": ".queryparams", - "get_response_headers": ".headers", - "get_security": ".security", - "get_security_from_env": ".security", - "HeaderMetadata": ".metadata", - "Logger": ".logger", - "marshal_json": ".serializers", - "match_content_type": ".values", - "match_status_codes": ".values", - "match_response": ".values", - "MultipartFormMetadata": ".metadata", - "OpenEnumMeta": ".enums", - "PathParamMetadata": ".metadata", - "QueryParamMetadata": ".metadata", - "remove_suffix": ".url", - "Retries": ".retries", - "retry": ".retries", - "retry_async": ".retries", - "RetryConfig": ".retries", - "RequestMetadata": ".metadata", - "SecurityMetadata": ".metadata", - "serialize_decimal": ".serializers", - "serialize_float": ".serializers", - "serialize_int": ".serializers", - "serialize_request_body": ".requestbodies", - "SerializedRequestBody": ".requestbodies", - "stream_to_text": ".serializers", - "stream_to_text_async": ".serializers", - "stream_to_bytes": ".serializers", - "stream_to_bytes_async": ".serializers", - "template_url": ".url", - "unmarshal": ".serializers", - "unmarshal_json": ".serializers", - "validate_decimal": ".serializers", - "validate_const": ".serializers", - "validate_float": ".serializers", - "validate_int": ".serializers", - "validate_open_enum": ".serializers", - "cast_partial": ".values", -} - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = import_module(module_name, __package__) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e - - -def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py deleted file mode 100644 index 387874ed..00000000 --- a/src/mistralai/utils/annotations.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from enum import Enum -from typing import Any, Optional - -def get_discriminator(model: Any, fieldname: str, key: str) -> str: - """ - Recursively search for the discriminator attribute in a model. - - Args: - model (Any): The model to search within. - fieldname (str): The name of the field to search for. - key (str): The key to search for in dictionaries. - - Returns: - str: The name of the discriminator attribute. - - Raises: - ValueError: If the discriminator attribute is not found. - """ - upper_fieldname = fieldname.upper() - - def get_field_discriminator(field: Any) -> Optional[str]: - """Search for the discriminator attribute in a given field.""" - - if isinstance(field, dict): - if key in field: - return f'{field[key]}' - - if hasattr(field, fieldname): - attr = getattr(field, fieldname) - if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' - - if hasattr(field, upper_fieldname): - attr = getattr(field, upper_fieldname) - if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' - - return None - - - if isinstance(model, list): - for field in model: - discriminator = get_field_discriminator(field) - if discriminator is not None: - return discriminator - - discriminator = get_field_discriminator(model) - if discriminator is not None: - return discriminator - - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py deleted file mode 100644 index c3bc13cf..00000000 --- a/src/mistralai/utils/enums.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import enum -import sys - -class OpenEnumMeta(enum.EnumMeta): - # The __call__ method `boundary` kwarg was added in 3.11 and must be present - # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 - # pylint: disable=unexpected-keyword-arg - # The __call__ method `values` varg must be named for pyright. - # pylint: disable=keyword-arg-before-vararg - - if sys.version_info >= (3, 11): - def __call__( - cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - *values, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - *values, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - except ValueError: - return value - else: - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py deleted file mode 100644 index 74a63f75..00000000 --- a/src/mistralai/utils/eventstreaming.py +++ /dev/null @@ -1,238 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import re -import json -from typing import ( - Callable, - Generic, - TypeVar, - Optional, - Generator, - AsyncGenerator, - Tuple, -) -import httpx - -T = TypeVar("T") - - -class EventStream(Generic[T]): - response: httpx.Response - generator: Generator[T, None, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - ): - self.response = response - self.generator = stream_events(response, decoder, sentinel) - - def __iter__(self): - return self - - def __next__(self): - return next(self.generator) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.response.close() - - -class EventStreamAsync(Generic[T]): - response: httpx.Response - generator: AsyncGenerator[T, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - ): - self.response = response - self.generator = stream_events_async(response, decoder, sentinel) - - def __aiter__(self): - return self - - async def __anext__(self): - return await self.generator.__anext__() - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.response.aclose() - - -class ServerEvent: - id: Optional[str] = None - event: Optional[str] = None - data: Optional[str] = None - retry: Optional[int] = None - - -MESSAGE_BOUNDARIES = [ - b"\r\n\r\n", - b"\n\n", - b"\r\r", -] - - -async def stream_events_async( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> AsyncGenerator[T, None]: - buffer = bytearray() - position = 0 - discard = False - async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def stream_events( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> Generator[T, None, None]: - buffer = bytearray() - position = 0 - discard = False - for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: - block = raw.decode() - lines = re.split(r"\r?\n|\r", block) - publish = False - event = ServerEvent() - data = "" - for line in lines: - if not line: - continue - - delim = line.find(":") - if delim <= 0: - continue - - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] - - if field == "event": - event.event = value - publish = True - elif field == "data": - data += value + "\n" - publish = True - elif field == "id": - event.id = value - publish = True - elif field == "retry": - event.retry = int(value) if value.isdigit() else None - publish = True - - if sentinel and data == f"{sentinel}\n": - return None, True - - if data: - data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass - - out = None - if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False - - -def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): - if len(sequence) > (len(buffer) - position): - return None - - for i, seq in enumerate(sequence): - if buffer[position + i] != seq: - return None - - return sequence diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py deleted file mode 100644 index e873495f..00000000 --- a/src/mistralai/utils/forms.py +++ /dev/null @@ -1,223 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .serializers import marshal_json - -from .metadata import ( - FormMetadata, - MultipartFormMetadata, - find_field_metadata, -) -from .values import _is_set, _val_to_string - - -def _populate_form( - field_name: str, - explode: bool, - obj: Any, - delimiter: str, - form: Dict[str, List[str]], -): - if not _is_set(obj): - return form - - if isinstance(obj, BaseModel): - items = [] - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_field_name = obj_field.alias if obj_field.alias is not None else name - if obj_field_name == "": - continue - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - form[obj_field_name] = [_val_to_string(val)] - else: - items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, Dict): - items = [] - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - form[key] = [_val_to_string(value)] - else: - items.append(f"{key}{delimiter}{_val_to_string(value)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - if explode: - if not field_name in form: - form[field_name] = [] - form[field_name].append(_val_to_string(value)) - else: - items.append(_val_to_string(value)) - - if len(items) > 0: - form[field_name] = [delimiter.join([str(item) for item in items])] - else: - form[field_name] = [_val_to_string(obj)] - - return form - - -def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: - """Extract file name, content, and content type from a file object.""" - file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] - - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue - - if file_metadata.content: - content = getattr(file_obj, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(file_obj, file_field_name, None) - else: - file_name = getattr(file_obj, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - return file_name, content, content_type - - -def serialize_multipart_form( - media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: - form: Dict[str, Any] = {} - files: List[Tuple[str, Any]] = [] - - if not isinstance(request, BaseModel): - raise TypeError("invalid request body type") - - request_fields: Dict[str, FieldInfo] = request.__class__.model_fields - request_field_types = get_type_hints(request.__class__) - - for name in request_fields: - field = request_fields[name] - - val = getattr(request, name) - if not _is_set(val): - continue - - field_metadata = find_field_metadata(field, MultipartFormMetadata) - if not field_metadata: - continue - - f_name = field.alias if field.alias else name - - if field_metadata.file: - if isinstance(val, List): - # Handle array of files - for file_obj in val: - if not _is_set(file_obj): - continue - - file_name, content, content_type = _extract_file_properties(file_obj) - - if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) - else: - files.append((f_name + "[]", (file_name, content))) - else: - # Handle single file - file_name, content, content_type = _extract_file_properties(val) - - if content_type is not None: - files.append((f_name, (file_name, content, content_type))) - else: - files.append((f_name, (file_name, content))) - elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) - else: - if isinstance(val, List): - values = [] - - for value in val: - if not _is_set(value): - continue - values.append(_val_to_string(value)) - - form[f_name + "[]"] = values - else: - form[f_name] = _val_to_string(val) - return media_type, form, files - - -def serialize_form_data(data: Any) -> Dict[str, Any]: - form: Dict[str, List[str]] = {} - - if isinstance(data, BaseModel): - data_fields: Dict[str, FieldInfo] = data.__class__.model_fields - data_field_types = get_type_hints(data.__class__) - for name in data_fields: - field = data_fields[name] - - val = getattr(data, name) - if not _is_set(val): - continue - - metadata = find_field_metadata(field, FormMetadata) - if metadata is None: - continue - - f_name = field.alias if field.alias is not None else name - - if metadata.json: - form[f_name] = [marshal_json(val, data_field_types[name])] - else: - if metadata.style == "form": - _populate_form( - f_name, - metadata.explode, - val, - ",", - form, - ) - else: - raise ValueError(f"Invalid form style for field {name}") - elif isinstance(data, Dict): - for key, value in data.items(): - if _is_set(value): - form[key] = [_val_to_string(value)] - else: - raise TypeError(f"Invalid request body type {type(data)} for form data") - - return form diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py deleted file mode 100644 index 37a6e7f9..00000000 --- a/src/mistralai/utils/queryparams.py +++ /dev/null @@ -1,205 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, -) - -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - QueryParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) -from .forms import _populate_form - - -def get_query_params( - query_params: Any, - gbls: Optional[Any] = None, -) -> Dict[str, List[str]]: - params: Dict[str, List[str]] = {} - - globals_already_populated = _populate_query_params(query_params, gbls, params, []) - if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) - - return params - - -def _populate_query_params( - query_params: Any, - gbls: Any, - query_param_values: Dict[str, List[str]], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(query_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields - param_field_types = get_type_hints(query_params.__class__) - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - - metadata = find_field_metadata(field, QueryParamMetadata) - if not metadata: - continue - - value = getattr(query_params, name) if _is_set(query_params) else None - - value, global_found = _populate_from_globals( - name, value, QueryParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - f_name = field.alias if field.alias is not None else name - serialization = metadata.serialization - if serialization is not None: - serialized_parms = _get_serialized_params( - metadata, f_name, value, param_field_types[name] - ) - for key, value in serialized_parms.items(): - if key in query_param_values: - query_param_values[key].extend(value) - else: - query_param_values[key] = [value] - else: - style = metadata.style - if style == "deepObject": - _populate_deep_object_query_params(f_name, value, query_param_values) - elif style == "form": - _populate_delimited_query_params( - metadata, f_name, value, ",", query_param_values - ) - elif style == "pipeDelimited": - _populate_delimited_query_params( - metadata, f_name, value, "|", query_param_values - ) - else: - raise NotImplementedError( - f"query param style {style} not yet supported" - ) - - return globals_already_populated - - -def _populate_deep_object_query_params( - field_name: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj): - return - - if isinstance(obj, BaseModel): - _populate_deep_object_query_params_basemodel(field_name, obj, params) - elif isinstance(obj, Dict): - _populate_deep_object_query_params_dict(field_name, obj, params) - - -def _populate_deep_object_query_params_basemodel( - prior_params_key: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj) or not isinstance(obj, BaseModel): - return - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - - f_name = obj_field.alias if obj_field.alias is not None else name - - params_key = f"{prior_params_key}[{f_name}]" - - obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if not _is_set(obj_param_metadata): - continue - - obj_val = getattr(obj, name) - if not _is_set(obj_val): - continue - - if isinstance(obj_val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, obj_val, params) - elif isinstance(obj_val, Dict): - _populate_deep_object_query_params_dict(params_key, obj_val, params) - elif isinstance(obj_val, List): - _populate_deep_object_query_params_list(params_key, obj_val, params) - else: - params[params_key] = [_val_to_string(obj_val)] - - -def _populate_deep_object_query_params_dict( - prior_params_key: str, - value: Dict, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for key, val in value.items(): - if not _is_set(val): - continue - - params_key = f"{prior_params_key}[{key}]" - - if isinstance(val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, val, params) - elif isinstance(val, Dict): - _populate_deep_object_query_params_dict(params_key, val, params) - elif isinstance(val, List): - _populate_deep_object_query_params_list(params_key, val, params) - else: - params[params_key] = [_val_to_string(val)] - - -def _populate_deep_object_query_params_list( - params_key: str, - value: List, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for val in value: - if not _is_set(val): - continue - - if params.get(params_key) is None: - params[params_key] = [] - - params[params_key].append(_val_to_string(val)) - - -def _populate_delimited_query_params( - metadata: QueryParamMetadata, - field_name: str, - obj: Any, - delimiter: str, - query_param_values: Dict[str, List[str]], -): - _populate_form( - field_name, - metadata.explode, - obj, - delimiter, - query_param_values, - ) diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py deleted file mode 100644 index d5240dd5..00000000 --- a/src/mistralai/utils/requestbodies.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import io -from dataclasses import dataclass -import re -from typing import ( - Any, - Optional, -) - -from .forms import serialize_form_data, serialize_multipart_form - -from .serializers import marshal_json - -SERIALIZATION_METHOD_TO_CONTENT_TYPE = { - "json": "application/json", - "form": "application/x-www-form-urlencoded", - "multipart": "multipart/form-data", - "raw": "application/octet-stream", - "string": "text/plain", -} - - -@dataclass -class SerializedRequestBody: - media_type: Optional[str] = None - content: Optional[Any] = None - data: Optional[Any] = None - files: Optional[Any] = None - - -def serialize_request_body( - request_body: Any, - nullable: bool, - optional: bool, - serialization_method: str, - request_body_type, -) -> Optional[SerializedRequestBody]: - if request_body is None: - if not nullable and optional: - return None - - media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] - - serialized_request_body = SerializedRequestBody(media_type) - - if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: - serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"multipart\/.*", media_type) is not None: - ( - serialized_request_body.media_type, - serialized_request_body.data, - serialized_request_body.files, - ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: - serialized_request_body.data = serialize_form_data(request_body) - elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): - serialized_request_body.content = request_body - elif isinstance(request_body, str): - serialized_request_body.content = request_body - else: - raise TypeError( - f"invalid request body type {type(request_body)} for mediaType {media_type}" - ) - - return serialized_request_body diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py deleted file mode 100644 index 4d608671..00000000 --- a/src/mistralai/utils/retries.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import asyncio -import random -import time -from typing import List - -import httpx - - -class BackoffStrategy: - initial_interval: int - max_interval: int - exponent: float - max_elapsed_time: int - - def __init__( - self, - initial_interval: int, - max_interval: int, - exponent: float, - max_elapsed_time: int, - ): - self.initial_interval = initial_interval - self.max_interval = max_interval - self.exponent = exponent - self.max_elapsed_time = max_elapsed_time - - -class RetryConfig: - strategy: str - backoff: BackoffStrategy - retry_connection_errors: bool - - def __init__( - self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool - ): - self.strategy = strategy - self.backoff = backoff - self.retry_connection_errors = retry_connection_errors - - -class Retries: - config: RetryConfig - status_codes: List[str] - - def __init__(self, config: RetryConfig, status_codes: List[str]): - self.config = config - self.status_codes = status_codes - - -class TemporaryError(Exception): - response: httpx.Response - - def __init__(self, response: httpx.Response): - self.response = response - - -class PermanentError(Exception): - inner: Exception - - def __init__(self, inner: Exception): - self.inner = inner - - -def retry(func, retries: Retries): - if retries.config.strategy == "backoff": - - def do_request() -> httpx.Response: - res: httpx.Response - try: - res = func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return retry_with_backoff( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return func() - - -async def retry_async(func, retries: Retries): - if retries.config.strategy == "backoff": - - async def do_request() -> httpx.Response: - res: httpx.Response - try: - res = await func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return await retry_with_backoff_async( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return await func() - - -def retry_with_backoff( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) - retries += 1 - - -async def retry_with_backoff_async( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return await func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) - await asyncio.sleep(sleep) - retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py deleted file mode 100644 index 3b8526bf..00000000 --- a/src/mistralai/utils/security.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import base64 - -from typing import ( - Any, - Dict, - List, - Optional, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - SecurityMetadata, - find_field_metadata, -) -import os - - -def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: - headers: Dict[str, str] = {} - query_params: Dict[str, List[str]] = {} - - if security is None: - return headers, query_params - - if not isinstance(security, BaseModel): - raise TypeError("security must be a pydantic model") - - sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields - for name in sec_fields: - sec_field = sec_fields[name] - - value = getattr(security, name) - if value is None: - continue - - metadata = find_field_metadata(sec_field, SecurityMetadata) - if metadata is None: - continue - if metadata.option: - _parse_security_option(headers, query_params, value) - return headers, query_params - if metadata.scheme: - # Special case for basic auth or custom auth which could be a flattened model - if metadata.sub_type in ["basic", "custom"] and not isinstance( - value, BaseModel - ): - _parse_security_scheme(headers, query_params, metadata, name, security) - else: - _parse_security_scheme(headers, query_params, metadata, name, value) - - return headers, query_params - - -def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: - if security is not None: - return security - - if not issubclass(security_class, BaseModel): - raise TypeError("security_class must be a pydantic model class") - - security_dict: Any = {} - - if os.getenv("MISTRAL_API_KEY"): - security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") - - return security_class(**security_dict) if security_dict else None - - -def _parse_security_option( - headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any -): - if not isinstance(option, BaseModel): - raise TypeError("security option must be a pydantic model") - - opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields - for name in opt_fields: - opt_field = opt_fields[name] - - metadata = find_field_metadata(opt_field, SecurityMetadata) - if metadata is None or not metadata.scheme: - continue - _parse_security_scheme( - headers, query_params, metadata, name, getattr(option, name) - ) - - -def _parse_security_scheme( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - field_name: str, - scheme: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - if isinstance(scheme, BaseModel): - if scheme_type == "http": - if sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return - if sub_type == "custom": - return - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - value = getattr(scheme, name) - - _parse_security_scheme_value( - headers, query_params, scheme_metadata, metadata, name, value - ) - else: - _parse_security_scheme_value( - headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme - ) - - -def _parse_security_scheme_value( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - security_metadata: SecurityMetadata, - field_name: str, - value: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - header_name = security_metadata.get_field_name(field_name) - - if scheme_type == "apiKey": - if sub_type == "header": - headers[header_name] = value - elif sub_type == "query": - query_params[header_name] = [value] - else: - raise ValueError("sub type {sub_type} not supported") - elif scheme_type == "openIdConnect": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "oauth2": - if sub_type != "client_credentials": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "http": - if sub_type == "bearer": - headers[header_name] = _apply_bearer(value) - elif sub_type == "custom": - return - else: - raise ValueError("sub type {sub_type} not supported") - else: - raise ValueError("scheme type {scheme_type} not supported") - - -def _apply_bearer(token: str) -> str: - return token.lower().startswith("bearer ") and token or f"Bearer {token}" - - -def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): - username = "" - password = "" - - if not isinstance(scheme, BaseModel): - raise TypeError("basic auth scheme must be a pydantic model") - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - field_name = metadata.field_name - value = getattr(scheme, name) - - if field_name == "username": - username = value - if field_name == "password": - password = value - - data = f"{username}:{password}".encode() - headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py deleted file mode 100644 index 76e44d71..00000000 --- a/src/mistralai/utils/serializers.py +++ /dev/null @@ -1,248 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -import functools -import json -import typing -from typing import Any, Dict, List, Tuple, Union, get_args -import typing_extensions -from typing_extensions import get_origin - -import httpx -from pydantic import ConfigDict, create_model -from pydantic_core import from_json - -from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset - - -def serialize_decimal(as_str: bool): - def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: - return None - if isinstance(d, Unset): - return d - - if not isinstance(d, Decimal): - raise ValueError("Expected Decimal object") - - return str(d) if as_str else float(d) - - return serialize - - -def validate_decimal(d): - if d is None: - return None - - if isinstance(d, (Decimal, Unset)): - return d - - if not isinstance(d, (str, int, float)): - raise ValueError("Expected string, int or float") - - return Decimal(str(d)) - - -def serialize_float(as_str: bool): - def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: - return None - if isinstance(f, Unset): - return f - - if not isinstance(f, float): - raise ValueError("Expected float") - - return str(f) if as_str else f - - return serialize - - -def validate_float(f): - if f is None: - return None - - if isinstance(f, (float, Unset)): - return f - - if not isinstance(f, str): - raise ValueError("Expected string") - - return float(f) - - -def serialize_int(as_str: bool): - def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: - return None - if isinstance(i, Unset): - return i - - if not isinstance(i, int): - raise ValueError("Expected int") - - return str(i) if as_str else i - - return serialize - - -def validate_int(b): - if b is None: - return None - - if isinstance(b, (int, Unset)): - return b - - if not isinstance(b, str): - raise ValueError("Expected string") - - return int(b) - - -def validate_open_enum(is_int: bool): - def validate(e): - if e is None: - return None - - if isinstance(e, Unset): - return e - - if is_int: - if not isinstance(e, int): - raise ValueError("Expected int") - else: - if not isinstance(e, str): - raise ValueError("Expected string") - - return e - - return validate - - -def validate_const(v): - def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: - return None - - if v != c: - raise ValueError(f"Expected {v}") - - return c - - return validate - - -def unmarshal_json(raw, typ: Any) -> Any: - return unmarshal(from_json(raw), typ) - - -def unmarshal(val, typ: Any) -> Any: - unmarshaller = create_model( - "Unmarshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = unmarshaller(body=val) - - # pyright: ignore[reportAttributeAccessIssue] - return m.body # type: ignore - - -def marshal_json(val, typ): - if is_nullable(typ) and val is None: - return "null" - - marshaller = create_model( - "Marshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = marshaller(body=val) - - d = m.model_dump(by_alias=True, mode="json", exclude_none=True) - - if len(d) == 0: - return "" - - return json.dumps(d[next(iter(d))], separators=(",", ":")) - - -def is_nullable(field): - origin = get_origin(field) - if origin is Nullable or origin is OptionalNullable: - return True - - if not origin is Union or type(None) not in get_args(field): - return False - - for arg in get_args(field): - if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: - return True - - return False - - -def is_union(obj: object) -> bool: - """ - Returns True if the given object is a typing.Union or typing_extensions.Union. - """ - return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) - - -def stream_to_text(stream: httpx.Response) -> str: - return "".join(stream.iter_text()) - - -async def stream_to_text_async(stream: httpx.Response) -> str: - return "".join([chunk async for chunk in stream.aiter_text()]) - - -def stream_to_bytes(stream: httpx.Response) -> bytes: - return stream.content - - -async def stream_to_bytes_async(stream: httpx.Response) -> bytes: - return await stream.aread() - - -def get_pydantic_model(data: Any, typ: Any) -> Any: - if not _contains_pydantic_model(data): - return unmarshal(data, typ) - - return data - - -def _contains_pydantic_model(data: Any) -> bool: - if isinstance(data, BaseModel): - return True - if isinstance(data, List): - return any(_contains_pydantic_model(item) for item in data) - if isinstance(data, Dict): - return any(_contains_pydantic_model(value) for value in data.values()) - - return False - - -@functools.cache -def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: - """ - Get typing objects by name from typing and typing_extensions. - Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types - """ - result = tuple( - getattr(module, name) - for module in (typing, typing_extensions) - if hasattr(module, name) - ) - if not result: - raise ValueError( - f"Neither typing nor typing_extensions has an object called {name!r}" - ) - return result - diff --git a/src/mistralai/version.py b/src/mistralai/version.py deleted file mode 100644 index ee8f8def..00000000 --- a/src/mistralai/version.py +++ /dev/null @@ -1,7 +0,0 @@ -from importlib import metadata - -try: - __version__ = metadata.version(__package__) -except metadata.PackageNotFoundError: - # Case where package metadata is not available. - __version__ = "" diff --git a/tasks.py b/tasks.py new file mode 100644 index 00000000..8b1bc3f0 --- /dev/null +++ b/tasks.py @@ -0,0 +1,48 @@ +import re +from invoke.context import Context +from invoke.tasks import task +from utils.speakeasy import ( + pin_speakeasy_version, + OpenAPISpecsPinned, + SpeakeasyTargets, + WORKFLOW_PATH, + WORKFLOW_LOCK_PATH, +) + + +@task(iterable=["targets"]) +def update_speakeasy( + ctx: Context, + version: str, + targets: list[SpeakeasyTargets] = [SpeakeasyTargets.ALL], + workflow_path: str = WORKFLOW_PATH, + workflow_lock_path: str = WORKFLOW_LOCK_PATH, + verbose: bool = False, +): + """ + Update the speakeasy version and pin the openapi specs to the current revision. + + Usage: + inv update-speakeasy --version "1.580.2" --targets "all" + inv update-speakeasy --version "1.580.2" --targets "mistralai-azure-sdk" --targets "mistralai-gcp-sdk" --verbose + inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" + inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" --verbose + """ + if not re.match(r'^\d+\.\d+\.\d+$', version): + raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") + for target in targets: + try: + SpeakeasyTargets(target) + except ValueError: + raise ValueError( + f"Invalid target: {target}. Your targets must be one of {SpeakeasyTargets.list()}" + ) + cmd = ( + "speakeasy run" + + " --skip-versioning" + + "".join(f" -t {target}" for target in targets) + + (" --verbose" if verbose else "") + ) + pin_speakeasy_version(workflow_path=workflow_path, version=version) + with OpenAPISpecsPinned(workflow_path, workflow_lock_path): + ctx.run(cmd) diff --git a/tests/test_azure_integration.py b/tests/test_azure_integration.py new file mode 100644 index 00000000..ac4e38a1 --- /dev/null +++ b/tests/test_azure_integration.py @@ -0,0 +1,433 @@ +""" +Integration tests for Azure SDK. + +These tests require credentials and make real API calls. +Skip if AZURE_API_KEY env var is not set. + +Prerequisites: + 1. Azure API key (stored in Bitwarden at "[MaaS] - Azure Foundry API key") + 2. Tailscale connected via gw-0 exit node + +Usage: + AZURE_API_KEY=xxx pytest tests/test_azure_integration.py -v + +Environment variables: + AZURE_API_KEY: API key (required) + AZURE_ENDPOINT: Base URL (default: https://maas-qa-aifoundry.services.ai.azure.com/models) + AZURE_MODEL: Model name (default: maas-qa-ministral-3b) + AZURE_API_VERSION: API version (default: 2024-05-01-preview) + +Note: AZURE_ENDPOINT should be the base URL without path suffixes. +The SDK appends /chat/completions to this URL. The api_version parameter +is automatically injected as a query parameter by the SDK. + +Available models: + Chat: maas-qa-ministral-3b, maas-qa-mistral-large-3, maas-qa-mistral-medium-2505 + OCR: maas-qa-mistral-document-ai-2505, maas-qa-mistral-document-ai-2512 + (OCR uses a separate endpoint, not tested here) +""" +import json +import os + +import pytest + +# Configuration from env vars +AZURE_API_KEY = os.environ.get("AZURE_API_KEY") +AZURE_ENDPOINT = os.environ.get( + "AZURE_ENDPOINT", + "https://maas-qa-aifoundry.services.ai.azure.com/models", +) +AZURE_MODEL = os.environ.get("AZURE_MODEL", "maas-qa-ministral-3b") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +SKIP_REASON = "AZURE_API_KEY env var required" + +pytestmark = pytest.mark.skipif( + not AZURE_API_KEY, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def azure_client(): + """Create an Azure client with api_version parameter.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + return MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + + +class TestAzureChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, azure_client): + """Test basic chat completion returns a response.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, azure_client): + """Test chat completion with system + user message.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, azure_client): + """Test chat completion respects max_tokens.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, azure_client): + """Test chat completion accepts temperature parameter.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, azure_client): + """Test chat completion stops at stop sequence.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, azure_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, azure_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, azure_client): + """Test that the model returns a tool call when given tools.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, azure_client): + """Test JSON response format returns valid JSON.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + def test_completion_with_n(self, azure_client): + """Test completion with n=2 returns multiple choices.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say a random word."} + ], + n=2, + ) + assert res is not None + assert len(res.choices) == 2 + for choice in res.choices: + assert choice.message.content is not None + + +class TestAzureChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, azure_client): + """Test streaming returns chunks with content.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, azure_client): + """Test streaming respects max_tokens truncation.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, azure_client): + """Test that the last chunk has a finish_reason.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, azure_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestAzureChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, azure_client): + """Test async chat completion returns a response.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, azure_client): + """Test async chat completion with system + user message.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, azure_client): + """Test async tool call returns tool_calls.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestAzureChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, azure_client): + """Test async streaming returns chunks with content.""" + stream = await azure_client.chat.stream_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestAzureContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralAzure works as a sync context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralAzure works as an async context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + async with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = await client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None diff --git a/tests/test_azure_v2_parity.py b/tests/test_azure_v2_parity.py new file mode 100644 index 00000000..8cd89bf4 --- /dev/null +++ b/tests/test_azure_v2_parity.py @@ -0,0 +1,269 @@ +""" +Parity tests for the Azure v2 SDK. + +Verifies that the regenerated mistralai.azure package exposes +the same public API surface as the v1 mistralai_azure package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.chat import Chat +from mistralai.azure.client.ocr import Ocr +from mistralai.azure.client.types import UNSET + +AZURE_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "ocr": {"process"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("api_key", _EMPTY), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), + ("api_version", "2024-05-01-preview"), +] + +CHAT_COMPLETE_PARAMS = [ + ("messages", _EMPTY), + ("model", "azureai"), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("safe_prompt", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +OCR_PROCESS_PARAMS = [ + ("model", _EMPTY), + ("document", _EMPTY), + ("id", None), + ("pages", UNSET), + ("include_image_base64", UNSET), + ("image_limit", UNSET), + ("image_min_size", UNSET), + ("bbox_annotation_format", UNSET), + ("document_annotation_format", UNSET), + ("document_annotation_prompt", UNSET), + ("table_format", UNSET), + ("extract_header", None), + ("extract_footer", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestAzureSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralAzure.__annotations__ + + def test_sdk_has_ocr(self): + assert "ocr" in MistralAzure.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralAzure.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralAzure, method), f"MistralAzure missing {method}" + + +class TestAzureChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_defaults_azureai(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default == "azureai" + + def test_stream_model_defaults_azureai(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default == "azureai" + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestAzureOcr: + def test_has_process(self): + assert hasattr(Ocr, "process") + mark_tested("ocr", "process") + + def test_has_process_async(self): + assert hasattr(Ocr, "process_async") + mark_tested("ocr", "process_async") + + # -- process params -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process) + assert param_name in sig.parameters, f"Ocr.process missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- process_async matches process -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process_async) + assert param_name in sig.parameters, f"Ocr.process_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_process_async_matches_process(self): + sync_params = set(inspect.signature(Ocr.process).parameters) - {"self"} + async_params = set(inspect.signature(Ocr.process_async).parameters) - {"self"} + assert sync_params == async_params + + +class TestAzureCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in AZURE_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_ocr(self): + public = {m for m in dir(Ocr) if not m.startswith("_") and callable(getattr(Ocr, m, None))} + known = {"process", "process_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Ocr methods: {unexpected}" diff --git a/tests/test_gcp_integration.py b/tests/test_gcp_integration.py new file mode 100644 index 00000000..fe24b8b0 --- /dev/null +++ b/tests/test_gcp_integration.py @@ -0,0 +1,512 @@ +""" +Integration tests for GCP SDK. + +These tests require GCP credentials and make real API calls. +Skip if GCP_PROJECT_ID env var is not set. + +Prerequisites: + 1. Authenticate with GCP: gcloud auth application-default login + 2. Have "Vertex AI User" role on the project (e.g. model-garden-420509) + +The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + +Available models: + - Chat: mistral-small-2503, mistral-large-2501, ... + - FIM: codestral-2 + See: https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/mistral + +Usage: + GCP_PROJECT_ID=model-garden-420509 pytest tests/test_gcp_integration.py -v + +Environment variables: + GCP_PROJECT_ID: GCP project ID (required, or auto-detected from credentials) + GCP_REGION: Vertex AI region (default: us-central1) + GCP_MODEL: Model name for chat (default: mistral-small-2503) + GCP_FIM_MODEL: Model name for FIM (default: codestral-2) + +""" +import json +import os + +import pytest + +# Configuration from env vars +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") +GCP_FIM_MODEL = os.environ.get("GCP_FIM_MODEL", "codestral-2") + +SKIP_REASON = "GCP_PROJECT_ID env var required" + +pytestmark = pytest.mark.skipif( + not GCP_PROJECT_ID, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def gcp_client(): + """Create a GCP client for chat tests. + + The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + """ + from mistralai.gcp.client import MistralGCP + return MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) + + +class TestGCPChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, gcp_client): + """Test basic chat completion returns a response.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, gcp_client): + """Test chat completion with system + user message.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, gcp_client): + """Test chat completion respects max_tokens.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, gcp_client): + """Test chat completion accepts temperature parameter.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, gcp_client): + """Test chat completion stops at stop sequence.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, gcp_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, gcp_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, gcp_client): + """Test that the model returns a tool call when given tools.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, gcp_client): + """Test JSON response format returns valid JSON.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + +class TestGCPChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, gcp_client): + """Test streaming returns chunks with content.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, gcp_client): + """Test streaming respects max_tokens truncation.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, gcp_client): + """Test that the last chunk has a finish_reason.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, gcp_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestGCPChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, gcp_client): + """Test async chat completion returns a response.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, gcp_client): + """Test async chat completion with system + user message.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, gcp_client): + """Test async tool call returns tool_calls.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestGCPChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, gcp_client): + """Test async streaming returns chunks with content.""" + stream = await gcp_client.chat.stream_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestGCPContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralGCP works as a sync context manager.""" + from mistralai.gcp.client import MistralGCP + with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralGCP works as an async context manager.""" + from mistralai.gcp.client import MistralGCP + async with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = await client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + +class TestGCPFIM: + """Test FIM (Fill-in-the-middle) completion.""" + + def _make_fim_client(self): + """Create a GCP client configured for FIM model.""" + from mistralai.gcp.client import MistralGCP + return MistralGCP(project_id=GCP_PROJECT_ID, region=GCP_REGION) + + def test_fim_complete(self): + """Test FIM completion returns a response.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + def test_fim_stream(self): + """Test FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = client.fim.stream( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 + + def test_fim_with_max_tokens(self): + """Test FIM completion with max_tokens.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def add(a, b):", + suffix=" return result", + max_tokens=10, + timeout_ms=10000, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + @pytest.mark.asyncio + async def test_fim_complete_async(self): + """Test async FIM completion returns a response.""" + client = self._make_fim_client() + res = await client.fim.complete_async( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_fim_stream_async(self): + """Test async FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = await client.fim.stream_async( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = [] + async for chunk in stream: + chunks.append(chunk) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 diff --git a/tests/test_gcp_v2_parity.py b/tests/test_gcp_v2_parity.py new file mode 100644 index 00000000..0d6471e4 --- /dev/null +++ b/tests/test_gcp_v2_parity.py @@ -0,0 +1,330 @@ +""" +Parity tests for the GCP v2 SDK. + +Verifies that the regenerated mistralai.gcp package exposes +the same public API surface as the v1 mistralai_gcp package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.chat import Chat +from mistralai.gcp.client.fim import Fim +from mistralai.gcp.client.types import UNSET + +GCP_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "fim": {"complete", "stream"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("project_id", None), + ("region", "europe-west4"), + ("access_token", None), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), +] + +CHAT_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("messages", _EMPTY), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +FIM_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("prompt", _EMPTY), + ("temperature", UNSET), + ("top_p", 1), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("suffix", UNSET), + ("min_tokens", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +FIM_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in FIM_COMPLETE_PARAMS +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestGCPSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralGCP.__annotations__ + + def test_sdk_has_fim(self): + assert "fim" in MistralGCP.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralGCP.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralGCP, method), f"MistralGCP missing {method}" + + +class TestGCPChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestGCPFim: + def test_has_complete(self): + assert hasattr(Fim, "complete") + mark_tested("fim", "complete") + + def test_has_complete_async(self): + assert hasattr(Fim, "complete_async") + mark_tested("fim", "complete_async") + + def test_has_stream(self): + assert hasattr(Fim, "stream") + mark_tested("fim", "stream") + + def test_has_stream_async(self): + assert hasattr(Fim, "stream_async") + mark_tested("fim", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete) + assert param_name in sig.parameters, f"Fim.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream) + assert param_name in sig.parameters, f"Fim.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete_async) + assert param_name in sig.parameters, f"Fim.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream_async) + assert param_name in sig.parameters, f"Fim.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Fim.complete).parameters) - {"self"} + async_params = set(inspect.signature(Fim.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Fim.stream).parameters) - {"self"} + async_params = set(inspect.signature(Fim.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["stream"].default is True + + def test_complete_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["top_p"].default == 1 + + def test_stream_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["top_p"].default == 1 + + +class TestGCPCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in GCP_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_fim(self): + public = {m for m in dir(Fim) if not m.startswith("_") and callable(getattr(Fim, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Fim methods: {unexpected}" diff --git a/tests/test_prepare_readme.py b/tests/test_prepare_readme.py new file mode 100644 index 00000000..ce3e11c9 --- /dev/null +++ b/tests/test_prepare_readme.py @@ -0,0 +1,37 @@ +import importlib.util +from pathlib import Path + +SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "prepare_readme.py" +SPEC = importlib.util.spec_from_file_location("prepare_readme", SCRIPT_PATH) +if SPEC is None or SPEC.loader is None: + raise ImportError(f"Unable to load prepare_readme from {SCRIPT_PATH}") +prepare_readme = importlib.util.module_from_spec(SPEC) +SPEC.loader.exec_module(prepare_readme) + + +def test_rewrite_relative_links_keeps_absolute() -> None: + base_url = "https://example.com/blob/main/" + contents = "[Migration](MIGRATION.md)\n[Docs](https://docs.mistral.ai)" + expected = ( + "[Migration](https://example.com/blob/main/MIGRATION.md)\n" + "[Docs](https://docs.mistral.ai)" + ) + assert prepare_readme.rewrite_relative_links(contents, base_url) == expected + + +def test_main_prints_rewritten_readme_with_defaults(tmp_path, capsys) -> None: + original = "[Migration](MIGRATION.md)\n" + base_url = prepare_readme.build_base_url( + prepare_readme.DEFAULT_REPO_URL, + prepare_readme.DEFAULT_BRANCH, + "", + ) + expected = f"[Migration]({base_url}MIGRATION.md)\n" + readme_path = tmp_path / "README.md" + readme_path.write_text(original, encoding="utf-8") + + exit_code = prepare_readme.main(["--readme", str(readme_path)]) + + captured = capsys.readouterr() + assert exit_code == 0 + assert captured.out == expected diff --git a/utils/speakeasy.py b/utils/speakeasy.py new file mode 100755 index 00000000..7c685feb --- /dev/null +++ b/utils/speakeasy.py @@ -0,0 +1,91 @@ +""" +This script: +- pins the OpenAPI specs, +- runs speakeasy to update the SDKs' files, +- and then unpins the OpenAPI specs. + +It is advised to often run this script to avoid getting unrelated changes (due to updates) when modifying the OpenAPI specs. +""" + +import yaml +from io import TextIOWrapper +import copy +import subprocess +from enum import Enum + +WORKFLOW_PATH = ".speakeasy/workflow.yaml" +WORKFLOW_LOCK_PATH = ".speakeasy/workflow.lock" + + +def set_location_rev(yaml_content: dict, source_name: str, new_rev: str) -> None: + registry = yaml_content["sources"][source_name]["inputs"][0]["location"].split(":")[0] + yaml_content["sources"][source_name]["inputs"][0]["location"] = f"{registry}:{new_rev}" + + +def write_yaml(yaml_content: dict, file: TextIOWrapper) -> None: + return yaml.dump( + yaml_content, file, default_flow_style=False, sort_keys=False, indent=4 + ) + +def pin_speakeasy_version(workflow_path: str, version: str): + with open(workflow_path, "r") as file: + workflow_yaml = yaml.safe_load(file) + workflow_yaml["speakeasyVersion"] = version + with open(workflow_path, "w") as file: + write_yaml(workflow_yaml, file) + +class OpenAPISpecsPinned: + def __init__(self, workflow_path: str, workflow_lock_path: str): + self.workflow_path = workflow_path + self.workflow_lock_path = workflow_lock_path + with open(workflow_path, "r") as file: + self.workflow_yaml = yaml.safe_load(file) + + def __enter__(self): + print("OpenAPI specs pinned to current revision") + self.pin_to_current_rev() + + def __exit__(self, exc_type, exc_value, traceback): + self.unpin() + print("OpenAPI specs unpinned") + + def pin_to_current_rev(self): + yaml_copy = copy.deepcopy(self.workflow_yaml) + # Getting the current revisions of the OpenAPI specs + with open(self.workflow_lock_path, "r") as lock_file: + yaml_lock = yaml.safe_load(lock_file) + rev_azure = yaml_lock["sources"]["mistral-azure-source"]["sourceRevisionDigest"] + rev_google_cloud = yaml_lock["sources"]["mistral-google-cloud-source"]["sourceRevisionDigest"] + rev_mistralai = yaml_lock["sources"]["mistral-openapi"]["sourceRevisionDigest"] + + # Pinning the OpenAPI specs to the current revisions + with open(self.workflow_path, "w") as file: + set_location_rev(yaml_copy, "mistral-azure-source", rev_azure) + set_location_rev(yaml_copy, "mistral-google-cloud-source", rev_google_cloud) + set_location_rev(yaml_copy, "mistral-openapi", rev_mistralai) + write_yaml(yaml_content=yaml_copy, file=file) + + + def unpin(self): + with open(self.workflow_path, "w") as file: + write_yaml(yaml_content=self.workflow_yaml, file=file) + +class SpeakeasyTargets(str, Enum): + """ + The list of targets defined in the .speakeasy/workflow.yaml[.targets] section. + This can also be listed running `speakeasy list targets` in the root of the project. + """ + ALL = "all" + MISTRALAI_SDK = "mistralai-sdk" + MISTRALAI_AZURE_SDK = "mistralai-azure-sdk" + MISTRALAI_GCP_SDK = "mistralai-gcp-sdk" + + @classmethod + def list(cls): + return list(map(lambda c: c.value, cls)) + + +if __name__ == "__main__": + pin_speakeasy_version(workflow_path=WORKFLOW_PATH, version="1.580.2") + with OpenAPISpecsPinned(WORKFLOW_PATH, WORKFLOW_LOCK_PATH): + subprocess.run(["speakeasy", "run", "-t", "mistralai-sdk", "--skip-versioning", "--verbose"]) diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..1a37a7d6 --- /dev/null +++ b/uv.lock @@ -0,0 +1,1646 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "authlib" +version = "1.6.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/9b/b1661026ff24bc641b76b78c5222d614776b0c085bcfdac9bd15a1cb4b35/authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e", size = 164894, upload-time = "2025-12-12T08:01:41.464Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/51/321e821856452f7386c4e9df866f196720b1ad0c5ea1623ea7399969ae3b/authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd", size = 244005, upload-time = "2025-12-12T08:01:40.209Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, + { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, + { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, + { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, + { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, + { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, + { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, + { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, + { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, + { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, + { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, + { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, + { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, + { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, + { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, + { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, + { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, + { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, + { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, + { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, + { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, + { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, + { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, + { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, + { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, + { url = "https://files.pythonhosted.org/packages/d9/cd/1a8633802d766a0fa46f382a77e096d7e209e0817892929655fe0586ae32/cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32", size = 3689163, upload-time = "2025-10-15T23:18:13.821Z" }, + { url = "https://files.pythonhosted.org/packages/4c/59/6b26512964ace6480c3e54681a9859c974172fb141c38df11eadd8416947/cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c", size = 3429474, upload-time = "2025-10-15T23:18:15.477Z" }, + { url = "https://files.pythonhosted.org/packages/06/8a/e60e46adab4362a682cf142c7dcb5bf79b782ab2199b0dcb81f55970807f/cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea", size = 3698132, upload-time = "2025-10-15T23:18:17.056Z" }, + { url = "https://files.pythonhosted.org/packages/da/38/f59940ec4ee91e93d3311f7532671a5cef5570eb04a144bf203b58552d11/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b", size = 4243992, upload-time = "2025-10-15T23:18:18.695Z" }, + { url = "https://files.pythonhosted.org/packages/b0/0c/35b3d92ddebfdfda76bb485738306545817253d0a3ded0bfe80ef8e67aa5/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb", size = 4409944, upload-time = "2025-10-15T23:18:20.597Z" }, + { url = "https://files.pythonhosted.org/packages/99/55/181022996c4063fc0e7666a47049a1ca705abb9c8a13830f074edb347495/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717", size = 4242957, upload-time = "2025-10-15T23:18:22.18Z" }, + { url = "https://files.pythonhosted.org/packages/ba/af/72cd6ef29f9c5f731251acadaeb821559fe25f10852f44a63374c9ca08c1/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9", size = 4409447, upload-time = "2025-10-15T23:18:24.209Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c3/e90f4a4feae6410f914f8ebac129b9ae7a8c92eb60a638012dde42030a9d/cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c", size = 3438528, upload-time = "2025-10-15T23:18:26.227Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "google-auth" +version = "2.45.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + +[[package]] +name = "griffe" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/0c/3a471b6e31951dce2360477420d0a8d1e00dea6cf33b70f3e8c3ab6e28e1/griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea", size = 424112, upload-time = "2025-11-10T15:03:15.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/83/3b1d03d36f224edded98e9affd0467630fc09d766c0e56fb1498cbb04a9b/griffe-1.15.0-py3-none-any.whl", hash = "sha256:6f6762661949411031f5fcda9593f586e6ce8340f0ba88921a0f2ef7a81eb9a3", size = 150705, upload-time = "2025-11-10T15:03:13.549Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "invoke" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mcp" +version = "1.25.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" }, +] + +[[package]] +name = "mistralai" +version = "2.0.0b1" +source = { editable = "." } +dependencies = [ + { name = "eval-type-backport" }, + { name = "httpx" }, + { name = "invoke" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "pyyaml" }, + { name = "typing-inspection" }, +] + +[package.optional-dependencies] +agents = [ + { name = "authlib" }, + { name = "griffe" }, + { name = "mcp" }, +] +gcp = [ + { name = "google-auth" }, + { name = "requests" }, +] +realtime = [ + { name = "websockets" }, +] + +[package.dev-dependencies] +dev = [ + { name = "authlib" }, + { name = "griffe" }, + { name = "mcp" }, + { name = "mypy" }, + { name = "pylint" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "types-authlib" }, + { name = "types-python-dateutil" }, + { name = "types-pyyaml" }, + { name = "websockets" }, +] +lint = [ + { name = "mypy" }, + { name = "pyright" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "authlib", marker = "extra == 'agents'", specifier = ">=1.5.2,<2.0" }, + { name = "eval-type-backport", specifier = ">=0.2.0" }, + { name = "google-auth", marker = "extra == 'gcp'", specifier = ">=2.27.0" }, + { name = "griffe", marker = "extra == 'agents'", specifier = ">=1.7.3,<2.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "invoke", specifier = ">=2.2.0,<3.0.0" }, + { name = "mcp", marker = "extra == 'agents'", specifier = ">=1.0,<2.0" }, + { name = "opentelemetry-api", specifier = ">=1.33.1,<2.0.0" }, + { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, + { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.61" }, + { name = "pydantic", specifier = ">=2.11.2" }, + { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, + { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, + { name = "typing-inspection", specifier = ">=0.4.0" }, + { name = "websockets", marker = "extra == 'realtime'", specifier = ">=13.0" }, +] +provides-extras = ["gcp", "agents", "realtime"] + +[package.metadata.requires-dev] +dev = [ + { name = "authlib", specifier = ">=1.5.2,<2" }, + { name = "griffe", specifier = ">=1.7.3,<2" }, + { name = "mcp", specifier = ">=1.0,<2" }, + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "types-authlib", specifier = ">=1.5.0.20250516,<2" }, + { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, + { name = "types-pyyaml", specifier = ">=6.0.12.20250516,<7" }, + { name = "websockets", specifier = ">=13.0" }, +] +lint = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "ruff", specifier = ">=0.11.10,<0.12" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "protobuf" +version = "6.33.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" }, + { url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.407" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/1b/0aa08ee42948b61745ac5b5b5ccaec4669e8884b53d31c8ec20b2fcd6b6f/pyright-1.1.407.tar.gz", hash = "sha256:099674dba5c10489832d4a4b2d302636152a9a42d317986c38474c76fe562262", size = 4122872, upload-time = "2025-10-24T23:17:15.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/93/b69052907d032b00c40cb656d21438ec00b3a471733de137a3f65a49a0a0/pyright-1.1.407-py3-none-any.whl", hash = "sha256:6dd419f54fcc13f03b52285796d65e639786373f433e243f8b94cf93a7444d21", size = 5997008, upload-time = "2025-10-24T23:17:13.159Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.21" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/78/96/804520d0850c7db98e5ccb70282e29208723f0964e88ffd9d0da2f52ea09/python_multipart-0.0.21.tar.gz", hash = "sha256:7137ebd4d3bbf70ea1622998f902b97a29434a9e8dc40eb203bbcf7c2a2cba92", size = 37196, upload-time = "2025-12-17T09:24:22.446Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/40/44efbb0dfbd33aca6a6483191dae0716070ed99e2ecb0c53683f400a0b4f/pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3", size = 8760432, upload-time = "2025-07-14T20:13:05.9Z" }, + { url = "https://files.pythonhosted.org/packages/5e/bf/360243b1e953bd254a82f12653974be395ba880e7ec23e3731d9f73921cc/pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b", size = 9590103, upload-time = "2025-07-14T20:13:07.698Z" }, + { url = "https://files.pythonhosted.org/packages/57/38/d290720e6f138086fb3d5ffe0b6caa019a791dd57866940c82e4eeaf2012/pywin32-311-cp310-cp310-win_arm64.whl", hash = "sha256:0502d1facf1fed4839a9a51ccbcc63d952cf318f78ffc00a7e78528ac27d7a2b", size = 8778557, upload-time = "2025-07-14T20:13:11.11Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/449a6a91e5d6db51420875c54f6aff7c97a86a3b13a0b4f1a5c13b988de3/pywin32-311-cp311-cp311-win32.whl", hash = "sha256:184eb5e436dea364dcd3d2316d577d625c0351bf237c4e9a5fabbcfa5a58b151", size = 8697031, upload-time = "2025-07-14T20:13:13.266Z" }, + { url = "https://files.pythonhosted.org/packages/51/8f/9bb81dd5bb77d22243d33c8397f09377056d5c687aa6d4042bea7fbf8364/pywin32-311-cp311-cp311-win_amd64.whl", hash = "sha256:3ce80b34b22b17ccbd937a6e78e7225d80c52f5ab9940fe0506a1a16f3dab503", size = 9508308, upload-time = "2025-07-14T20:13:15.147Z" }, + { url = "https://files.pythonhosted.org/packages/44/7b/9c2ab54f74a138c491aba1b1cd0795ba61f144c711daea84a88b63dc0f6c/pywin32-311-cp311-cp311-win_arm64.whl", hash = "sha256:a733f1388e1a842abb67ffa8e7aad0e70ac519e09b0f6a784e65a136ec7cefd2", size = 8703930, upload-time = "2025-07-14T20:13:16.945Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/a0/39350dd17dd6d6c6507025c0e53aef67a9293a6d37d3511f23ea510d5800/pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b", size = 184227, upload-time = "2025-09-25T21:31:46.04Z" }, + { url = "https://files.pythonhosted.org/packages/05/14/52d505b5c59ce73244f59c7a50ecf47093ce4765f116cdb98286a71eeca2/pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956", size = 174019, upload-time = "2025-09-25T21:31:47.706Z" }, + { url = "https://files.pythonhosted.org/packages/43/f7/0e6a5ae5599c838c696adb4e6330a59f463265bfa1e116cfd1fbb0abaaae/pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8", size = 740646, upload-time = "2025-09-25T21:31:49.21Z" }, + { url = "https://files.pythonhosted.org/packages/2f/3a/61b9db1d28f00f8fd0ae760459a5c4bf1b941baf714e207b6eb0657d2578/pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198", size = 840793, upload-time = "2025-09-25T21:31:50.735Z" }, + { url = "https://files.pythonhosted.org/packages/7a/1e/7acc4f0e74c4b3d9531e24739e0ab832a5edf40e64fbae1a9c01941cabd7/pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b", size = 770293, upload-time = "2025-09-25T21:31:51.828Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ef/abd085f06853af0cd59fa5f913d61a8eab65d7639ff2a658d18a25d6a89d/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0", size = 732872, upload-time = "2025-09-25T21:31:53.282Z" }, + { url = "https://files.pythonhosted.org/packages/1f/15/2bc9c8faf6450a8b3c9fc5448ed869c599c0a74ba2669772b1f3a0040180/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69", size = 758828, upload-time = "2025-09-25T21:31:54.807Z" }, + { url = "https://files.pythonhosted.org/packages/a3/00/531e92e88c00f4333ce359e50c19b8d1de9fe8d581b1534e35ccfbc5f393/pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e", size = 142415, upload-time = "2025-09-25T21:31:55.885Z" }, + { url = "https://files.pythonhosted.org/packages/2a/fa/926c003379b19fca39dd4634818b00dec6c62d87faf628d1394e137354d4/pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c", size = 158561, upload-time = "2025-09-25T21:31:57.406Z" }, + { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" }, + { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" }, + { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" }, + { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" }, + { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" }, + { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" }, + { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" }, + { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" }, + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/0c/0c411a0ec64ccb6d104dcabe0e713e05e153a9a2c3c2bd2b32ce412166fe/rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288", size = 370490, upload-time = "2025-11-30T20:21:33.256Z" }, + { url = "https://files.pythonhosted.org/packages/19/6a/4ba3d0fb7297ebae71171822554abe48d7cab29c28b8f9f2c04b79988c05/rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00", size = 359751, upload-time = "2025-11-30T20:21:34.591Z" }, + { url = "https://files.pythonhosted.org/packages/cd/7c/e4933565ef7f7a0818985d87c15d9d273f1a649afa6a52ea35ad011195ea/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6", size = 389696, upload-time = "2025-11-30T20:21:36.122Z" }, + { url = "https://files.pythonhosted.org/packages/5e/01/6271a2511ad0815f00f7ed4390cf2567bec1d4b1da39e2c27a41e6e3b4de/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7", size = 403136, upload-time = "2025-11-30T20:21:37.728Z" }, + { url = "https://files.pythonhosted.org/packages/55/64/c857eb7cd7541e9b4eee9d49c196e833128a55b89a9850a9c9ac33ccf897/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324", size = 524699, upload-time = "2025-11-30T20:21:38.92Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ed/94816543404078af9ab26159c44f9e98e20fe47e2126d5d32c9d9948d10a/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df", size = 412022, upload-time = "2025-11-30T20:21:40.407Z" }, + { url = "https://files.pythonhosted.org/packages/61/b5/707f6cf0066a6412aacc11d17920ea2e19e5b2f04081c64526eb35b5c6e7/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3", size = 390522, upload-time = "2025-11-30T20:21:42.17Z" }, + { url = "https://files.pythonhosted.org/packages/13/4e/57a85fda37a229ff4226f8cbcf09f2a455d1ed20e802ce5b2b4a7f5ed053/rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221", size = 404579, upload-time = "2025-11-30T20:21:43.769Z" }, + { url = "https://files.pythonhosted.org/packages/f9/da/c9339293513ec680a721e0e16bf2bac3db6e5d7e922488de471308349bba/rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7", size = 421305, upload-time = "2025-11-30T20:21:44.994Z" }, + { url = "https://files.pythonhosted.org/packages/f9/be/522cb84751114f4ad9d822ff5a1aa3c98006341895d5f084779b99596e5c/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff", size = 572503, upload-time = "2025-11-30T20:21:46.91Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9b/de879f7e7ceddc973ea6e4629e9b380213a6938a249e94b0cdbcc325bb66/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7", size = 598322, upload-time = "2025-11-30T20:21:48.709Z" }, + { url = "https://files.pythonhosted.org/packages/48/ac/f01fc22efec3f37d8a914fc1b2fb9bcafd56a299edbe96406f3053edea5a/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139", size = 560792, upload-time = "2025-11-30T20:21:50.024Z" }, + { url = "https://files.pythonhosted.org/packages/e2/da/4e2b19d0f131f35b6146425f846563d0ce036763e38913d917187307a671/rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464", size = 221901, upload-time = "2025-11-30T20:21:51.32Z" }, + { url = "https://files.pythonhosted.org/packages/96/cb/156d7a5cf4f78a7cc571465d8aec7a3c447c94f6749c5123f08438bcf7bc/rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169", size = 235823, upload-time = "2025-11-30T20:21:52.505Z" }, + { url = "https://files.pythonhosted.org/packages/4d/6e/f964e88b3d2abee2a82c1ac8366da848fce1c6d834dc2132c3fda3970290/rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425", size = 370157, upload-time = "2025-11-30T20:21:53.789Z" }, + { url = "https://files.pythonhosted.org/packages/94/ba/24e5ebb7c1c82e74c4e4f33b2112a5573ddc703915b13a073737b59b86e0/rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d", size = 359676, upload-time = "2025-11-30T20:21:55.475Z" }, + { url = "https://files.pythonhosted.org/packages/84/86/04dbba1b087227747d64d80c3b74df946b986c57af0a9f0c98726d4d7a3b/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4", size = 389938, upload-time = "2025-11-30T20:21:57.079Z" }, + { url = "https://files.pythonhosted.org/packages/42/bb/1463f0b1722b7f45431bdd468301991d1328b16cffe0b1c2918eba2c4eee/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f", size = 402932, upload-time = "2025-11-30T20:21:58.47Z" }, + { url = "https://files.pythonhosted.org/packages/99/ee/2520700a5c1f2d76631f948b0736cdf9b0acb25abd0ca8e889b5c62ac2e3/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4", size = 525830, upload-time = "2025-11-30T20:21:59.699Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ad/bd0331f740f5705cc555a5e17fdf334671262160270962e69a2bdef3bf76/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97", size = 412033, upload-time = "2025-11-30T20:22:00.991Z" }, + { url = "https://files.pythonhosted.org/packages/f8/1e/372195d326549bb51f0ba0f2ecb9874579906b97e08880e7a65c3bef1a99/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89", size = 390828, upload-time = "2025-11-30T20:22:02.723Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/d88bb33294e3e0c76bc8f351a3721212713629ffca1700fa94979cb3eae8/rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d", size = 404683, upload-time = "2025-11-30T20:22:04.367Z" }, + { url = "https://files.pythonhosted.org/packages/50/32/c759a8d42bcb5289c1fac697cd92f6fe01a018dd937e62ae77e0e7f15702/rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038", size = 421583, upload-time = "2025-11-30T20:22:05.814Z" }, + { url = "https://files.pythonhosted.org/packages/2b/81/e729761dbd55ddf5d84ec4ff1f47857f4374b0f19bdabfcf929164da3e24/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7", size = 572496, upload-time = "2025-11-30T20:22:07.713Z" }, + { url = "https://files.pythonhosted.org/packages/14/f6/69066a924c3557c9c30baa6ec3a0aa07526305684c6f86c696b08860726c/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed", size = 598669, upload-time = "2025-11-30T20:22:09.312Z" }, + { url = "https://files.pythonhosted.org/packages/5f/48/905896b1eb8a05630d20333d1d8ffd162394127b74ce0b0784ae04498d32/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85", size = 561011, upload-time = "2025-11-30T20:22:11.309Z" }, + { url = "https://files.pythonhosted.org/packages/22/16/cd3027c7e279d22e5eb431dd3c0fbc677bed58797fe7581e148f3f68818b/rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c", size = 221406, upload-time = "2025-11-30T20:22:13.101Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5b/e7b7aa136f28462b344e652ee010d4de26ee9fd16f1bfd5811f5153ccf89/rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825", size = 236024, upload-time = "2025-11-30T20:22:14.853Z" }, + { url = "https://files.pythonhosted.org/packages/14/a6/364bba985e4c13658edb156640608f2c9e1d3ea3c81b27aa9d889fff0e31/rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229", size = 229069, upload-time = "2025-11-30T20:22:16.577Z" }, + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://files.pythonhosted.org/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://files.pythonhosted.org/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://files.pythonhosted.org/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://files.pythonhosted.org/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://files.pythonhosted.org/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, + { url = "https://files.pythonhosted.org/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, + { url = "https://files.pythonhosted.org/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, + { url = "https://files.pythonhosted.org/packages/e0/af/5ab4833eadc36c0a8ed2bc5c0de0493c04f6c06de223170bd0798ff98ced/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2", size = 414589, upload-time = "2025-11-30T20:22:48.872Z" }, + { url = "https://files.pythonhosted.org/packages/b7/de/f7192e12b21b9e9a68a6d0f249b4af3fdcdff8418be0767a627564afa1f1/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6", size = 394025, upload-time = "2025-11-30T20:22:50.196Z" }, + { url = "https://files.pythonhosted.org/packages/91/c4/fc70cd0249496493500e7cc2de87504f5aa6509de1e88623431fec76d4b6/rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e", size = 408895, upload-time = "2025-11-30T20:22:51.87Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/d9275b05ab96556fefff73a385813eb66032e4c99f411d0795372d9abcea/rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d", size = 422799, upload-time = "2025-11-30T20:22:53.341Z" }, + { url = "https://files.pythonhosted.org/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, + { url = "https://files.pythonhosted.org/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://files.pythonhosted.org/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://files.pythonhosted.org/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://files.pythonhosted.org/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, + { url = "https://files.pythonhosted.org/packages/3d/55/fa3b9cf31d0c963ecf1ba777f7cf4b2a2c976795ac430d24a1f43d25a6ba/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa", size = 408149, upload-time = "2025-11-30T20:23:12.864Z" }, + { url = "https://files.pythonhosted.org/packages/60/ca/780cf3b1a32b18c0f05c441958d3758f02544f1d613abf9488cd78876378/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083", size = 383843, upload-time = "2025-11-30T20:23:14.638Z" }, + { url = "https://files.pythonhosted.org/packages/82/86/d5f2e04f2aa6247c613da0c1dd87fcd08fa17107e858193566048a1e2f0a/rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9", size = 396507, upload-time = "2025-11-30T20:23:16.105Z" }, + { url = "https://files.pythonhosted.org/packages/4b/9a/453255d2f769fe44e07ea9785c8347edaf867f7026872e76c1ad9f7bed92/rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0", size = 414949, upload-time = "2025-11-30T20:23:17.539Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, + { url = "https://files.pythonhosted.org/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://files.pythonhosted.org/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://files.pythonhosted.org/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, + { url = "https://files.pythonhosted.org/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, + { url = "https://files.pythonhosted.org/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, + { url = "https://files.pythonhosted.org/packages/5b/3c/2882bdac942bd2172f3da574eab16f309ae10a3925644e969536553cb4ee/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18", size = 408005, upload-time = "2025-11-30T20:23:35.253Z" }, + { url = "https://files.pythonhosted.org/packages/ce/81/9a91c0111ce1758c92516a3e44776920b579d9a7c09b2b06b642d4de3f0f/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad", size = 382112, upload-time = "2025-11-30T20:23:36.842Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8e/1da49d4a107027e5fbc64daeab96a0706361a2918da10cb41769244b805d/rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07", size = 399049, upload-time = "2025-11-30T20:23:38.343Z" }, + { url = "https://files.pythonhosted.org/packages/df/5a/7ee239b1aa48a127570ec03becbb29c9d5a9eb092febbd1699d567cae859/rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f", size = 415661, upload-time = "2025-11-30T20:23:40.263Z" }, + { url = "https://files.pythonhosted.org/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, + { url = "https://files.pythonhosted.org/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://files.pythonhosted.org/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://files.pythonhosted.org/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, + { url = "https://files.pythonhosted.org/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, + { url = "https://files.pythonhosted.org/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, + { url = "https://files.pythonhosted.org/packages/ee/ca/be7bca14cf21513bdf9c0606aba17d1f389ea2b6987035eb4f62bd923f25/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419", size = 408340, upload-time = "2025-11-30T20:24:00.2Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c7/736e00ebf39ed81d75544c0da6ef7b0998f8201b369acf842f9a90dc8fce/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551", size = 383765, upload-time = "2025-11-30T20:24:01.759Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3f/da50dfde9956aaf365c4adc9533b100008ed31aea635f2b8d7b627e25b49/rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8", size = 396834, upload-time = "2025-11-30T20:24:03.687Z" }, + { url = "https://files.pythonhosted.org/packages/4e/00/34bcc2565b6020eab2623349efbdec810676ad571995911f1abdae62a3a0/rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5", size = 415470, upload-time = "2025-11-30T20:24:05.232Z" }, + { url = "https://files.pythonhosted.org/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, + { url = "https://files.pythonhosted.org/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://files.pythonhosted.org/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, + { url = "https://files.pythonhosted.org/packages/69/71/3f34339ee70521864411f8b6992e7ab13ac30d8e4e3309e07c7361767d91/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58", size = 372292, upload-time = "2025-11-30T20:24:16.537Z" }, + { url = "https://files.pythonhosted.org/packages/57/09/f183df9b8f2d66720d2ef71075c59f7e1b336bec7ee4c48f0a2b06857653/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a", size = 362128, upload-time = "2025-11-30T20:24:18.086Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/5c2594e937253457342e078f0cc1ded3dd7b2ad59afdbf2d354869110a02/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb", size = 391542, upload-time = "2025-11-30T20:24:20.092Z" }, + { url = "https://files.pythonhosted.org/packages/49/5c/31ef1afd70b4b4fbdb2800249f34c57c64beb687495b10aec0365f53dfc4/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c", size = 404004, upload-time = "2025-11-30T20:24:22.231Z" }, + { url = "https://files.pythonhosted.org/packages/e3/63/0cfbea38d05756f3440ce6534d51a491d26176ac045e2707adc99bb6e60a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3", size = 527063, upload-time = "2025-11-30T20:24:24.302Z" }, + { url = "https://files.pythonhosted.org/packages/42/e6/01e1f72a2456678b0f618fc9a1a13f882061690893c192fcad9f2926553a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5", size = 413099, upload-time = "2025-11-30T20:24:25.916Z" }, + { url = "https://files.pythonhosted.org/packages/b8/25/8df56677f209003dcbb180765520c544525e3ef21ea72279c98b9aa7c7fb/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738", size = 392177, upload-time = "2025-11-30T20:24:27.834Z" }, + { url = "https://files.pythonhosted.org/packages/4a/b4/0a771378c5f16f8115f796d1f437950158679bcd2a7c68cf251cfb00ed5b/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f", size = 406015, upload-time = "2025-11-30T20:24:29.457Z" }, + { url = "https://files.pythonhosted.org/packages/36/d8/456dbba0af75049dc6f63ff295a2f92766b9d521fa00de67a2bd6427d57a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877", size = 423736, upload-time = "2025-11-30T20:24:31.22Z" }, + { url = "https://files.pythonhosted.org/packages/13/64/b4d76f227d5c45a7e0b796c674fd81b0a6c4fbd48dc29271857d8219571c/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a", size = 573981, upload-time = "2025-11-30T20:24:32.934Z" }, + { url = "https://files.pythonhosted.org/packages/20/91/092bacadeda3edf92bf743cc96a7be133e13a39cdbfd7b5082e7ab638406/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4", size = 599782, upload-time = "2025-11-30T20:24:35.169Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b7/b95708304cd49b7b6f82fdd039f1748b66ec2b21d6a45180910802f1abf1/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e", size = 562191, upload-time = "2025-11-30T20:24:36.853Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "ruff" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054, upload-time = "2025-06-05T21:00:15.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516, upload-time = "2025-06-05T20:59:32.944Z" }, + { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083, upload-time = "2025-06-05T20:59:37.03Z" }, + { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024, upload-time = "2025-06-05T20:59:39.741Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324, upload-time = "2025-06-05T20:59:42.185Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416, upload-time = "2025-06-05T20:59:44.319Z" }, + { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197, upload-time = "2025-06-05T20:59:46.935Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615, upload-time = "2025-06-05T20:59:49.534Z" }, + { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080, upload-time = "2025-06-05T20:59:51.654Z" }, + { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315, upload-time = "2025-06-05T20:59:54.469Z" }, + { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640, upload-time = "2025-06-05T20:59:56.986Z" }, + { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364, upload-time = "2025-06-05T20:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462, upload-time = "2025-06-05T21:00:01.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028, upload-time = "2025-06-05T21:00:04.06Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992, upload-time = "2025-06-05T21:00:06.249Z" }, + { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944, upload-time = "2025-06-05T21:00:08.459Z" }, + { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669, upload-time = "2025-06-05T21:00:11.147Z" }, + { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928, upload-time = "2025-06-05T21:00:13.758Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "starlette" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/34/f5df66cb383efdbf4f2db23cabb27f51b1dcb737efaf8a558f6f1d195134/sse_starlette-3.1.2.tar.gz", hash = "sha256:55eff034207a83a0eb86de9a68099bd0157838f0b8b999a1b742005c71e33618", size = 26303, upload-time = "2025-12-31T08:02:20.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/95/8c4b76eec9ae574474e5d2997557cebf764bcd3586458956c30631ae08f4/sse_starlette-3.1.2-py3-none-any.whl", hash = "sha256:cd800dd349f4521b317b9391d3796fa97b71748a4da9b9e00aafab32dda375c8", size = 12484, upload-time = "2025-12-31T08:02:18.894Z" }, +] + +[[package]] +name = "starlette" +version = "0.50.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "types-authlib" +version = "1.6.6.20251220" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/c5/ed668b28a66b847903cc94362bf05171a01473e3782e60b362f246c160fe/types_authlib-1.6.6.20251220.tar.gz", hash = "sha256:a2369f23732fe88d5087ed720864f40d0319c19e8411d85a4930e31018996f90", size = 45595, upload-time = "2025-12-20T03:07:43.241Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/ef/e783f3d0d489f7bf66557c3f26cd620e4bae1cba43a59e69c4ced25853bc/types_authlib-1.6.6.20251220-py3-none-any.whl", hash = "sha256:dd1d545fe4c498686c0285d59dc950d87d977d294fc430617c91e0a11f6f4f2b", size = 102884, upload-time = "2025-12-20T03:07:42.028Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250915" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522, upload-time = "2025-09-15T03:01:00.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, +] + +[[package]] +name = "websockets" +version = "16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/74/221f58decd852f4b59cc3354cccaf87e8ef695fede361d03dc9a7396573b/websockets-16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a", size = 177343, upload-time = "2026-01-10T09:22:21.28Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/22ef6107ee52ab7f0b710d55d36f5a5d3ef19e8a205541a6d7ffa7994e5a/websockets-16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0", size = 175021, upload-time = "2026-01-10T09:22:22.696Z" }, + { url = "https://files.pythonhosted.org/packages/10/40/904a4cb30d9b61c0e278899bf36342e9b0208eb3c470324a9ecbaac2a30f/websockets-16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957", size = 175320, upload-time = "2026-01-10T09:22:23.94Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2f/4b3ca7e106bc608744b1cdae041e005e446124bebb037b18799c2d356864/websockets-16.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72", size = 183815, upload-time = "2026-01-10T09:22:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/26/d40eaa2a46d4302becec8d15b0fc5e45bdde05191e7628405a19cf491ccd/websockets-16.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde", size = 185054, upload-time = "2026-01-10T09:22:27.101Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/6500a0efc94f7373ee8fefa8c271acdfd4dca8bd49a90d4be7ccabfc397e/websockets-16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3", size = 184565, upload-time = "2026-01-10T09:22:28.293Z" }, + { url = "https://files.pythonhosted.org/packages/04/b4/96bf2cee7c8d8102389374a2616200574f5f01128d1082f44102140344cc/websockets-16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3", size = 183848, upload-time = "2026-01-10T09:22:30.394Z" }, + { url = "https://files.pythonhosted.org/packages/02/8e/81f40fb00fd125357814e8c3025738fc4ffc3da4b6b4a4472a82ba304b41/websockets-16.0-cp310-cp310-win32.whl", hash = "sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9", size = 178249, upload-time = "2026-01-10T09:22:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/b4/5f/7e40efe8df57db9b91c88a43690ac66f7b7aa73a11aa6a66b927e44f26fa/websockets-16.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35", size = 178685, upload-time = "2026-01-10T09:22:33.345Z" }, + { url = "https://files.pythonhosted.org/packages/f2/db/de907251b4ff46ae804ad0409809504153b3f30984daf82a1d84a9875830/websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8", size = 177340, upload-time = "2026-01-10T09:22:34.539Z" }, + { url = "https://files.pythonhosted.org/packages/f3/fa/abe89019d8d8815c8781e90d697dec52523fb8ebe308bf11664e8de1877e/websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad", size = 175022, upload-time = "2026-01-10T09:22:36.332Z" }, + { url = "https://files.pythonhosted.org/packages/58/5d/88ea17ed1ded2079358b40d31d48abe90a73c9e5819dbcde1606e991e2ad/websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d", size = 175319, upload-time = "2026-01-10T09:22:37.602Z" }, + { url = "https://files.pythonhosted.org/packages/d2/ae/0ee92b33087a33632f37a635e11e1d99d429d3d323329675a6022312aac2/websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe", size = 184631, upload-time = "2026-01-10T09:22:38.789Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c5/27178df583b6c5b31b29f526ba2da5e2f864ecc79c99dae630a85d68c304/websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b", size = 185870, upload-time = "2026-01-10T09:22:39.893Z" }, + { url = "https://files.pythonhosted.org/packages/87/05/536652aa84ddc1c018dbb7e2c4cbcd0db884580bf8e95aece7593fde526f/websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5", size = 185361, upload-time = "2026-01-10T09:22:41.016Z" }, + { url = "https://files.pythonhosted.org/packages/6d/e2/d5332c90da12b1e01f06fb1b85c50cfc489783076547415bf9f0a659ec19/websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64", size = 184615, upload-time = "2026-01-10T09:22:42.442Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/d3f9576691cae9253b51555f841bc6600bf0a983a461c79500ace5a5b364/websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6", size = 178246, upload-time = "2026-01-10T09:22:43.654Z" }, + { url = "https://files.pythonhosted.org/packages/54/67/eaff76b3dbaf18dcddabc3b8c1dba50b483761cccff67793897945b37408/websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac", size = 178684, upload-time = "2026-01-10T09:22:44.941Z" }, + { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" }, + { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" }, + { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" }, + { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" }, + { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" }, + { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" }, + { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" }, + { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" }, + { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" }, + { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" }, + { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" }, + { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" }, + { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" }, + { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" }, + { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" }, + { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" }, + { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" }, + { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" }, + { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" }, + { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" }, + { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" }, + { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" }, + { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" }, + { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" }, + { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" }, + { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" }, + { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" }, + { url = "https://files.pythonhosted.org/packages/72/07/c98a68571dcf256e74f1f816b8cc5eae6eb2d3d5cfa44d37f801619d9166/websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d", size = 174947, upload-time = "2026-01-10T09:23:36.166Z" }, + { url = "https://files.pythonhosted.org/packages/7e/52/93e166a81e0305b33fe416338be92ae863563fe7bce446b0f687b9df5aea/websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03", size = 175260, upload-time = "2026-01-10T09:23:37.409Z" }, + { url = "https://files.pythonhosted.org/packages/56/0c/2dbf513bafd24889d33de2ff0368190a0e69f37bcfa19009ef819fe4d507/websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da", size = 176071, upload-time = "2026-01-10T09:23:39.158Z" }, + { url = "https://files.pythonhosted.org/packages/a5/8f/aea9c71cc92bf9b6cc0f7f70df8f0b420636b6c96ef4feee1e16f80f75dd/websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c", size = 176968, upload-time = "2026-01-10T09:23:41.031Z" }, + { url = "https://files.pythonhosted.org/packages/9a/3f/f70e03f40ffc9a30d817eef7da1be72ee4956ba8d7255c399a01b135902a/websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767", size = 178735, upload-time = "2026-01-10T09:23:42.259Z" }, + { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +]