diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml
new file mode 100644
index 0000000..3f165a1
--- /dev/null
+++ b/.github/workflows/publish-to-pypi.yml
@@ -0,0 +1,47 @@
+name: Publish Python Package to PyPI
+
+on:
+ push:
+ branches:
+ - main
+
+jobs:
+ build-and-publish:
+ name: Build and publish Python distribution to PyPI
+ runs-on: ubuntu-latest
+ environment: release # Optional: if you have a GitHub environment for releases
+ permissions:
+ id-token: write # Required for trusted publishing, if you choose that route later
+ contents: read # Needed to check out the repository
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.9" # Or another version like "3.x" or based on pyproject.toml
+
+ - name: Install build dependencies
+ run: python -m pip install --upgrade pip build
+
+ - name: Build package
+ run: python -m build
+
+ # Uncomment the section below to test with TestPyPI first
+ - name: Publish package to TestPyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ user: __token__
+ password: ${{ secrets.TEST_PYPI_API_TOKEN }}
+ repository_url: https://test.pypi.org/legacy/
+
+ # - name: Publish package to PyPI
+ # uses: pypa/gh-action-pypi-publish@release/v1
+ # with:
+ # user: __token__
+ # password: ${{ secrets.PYPI_API_TOKEN }}
+ # # Optionally, you can add:
+ # # repository_url: https://test.pypi.org/legacy/ # To publish to TestPyPI first
+ # # skip_existing: true # To skip publishing if the version already exists
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..86eaa37
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,31 @@
+# NOTE: This file is auto generated by OpenAPI Generator.
+# URL: https://openapi-generator.tech
+#
+# ref: https://docs.gitlab.com/ee/ci/README.html
+# ref: https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Python.gitlab-ci.yml
+
+stages:
+ - test
+
+.pytest:
+ stage: test
+ script:
+ - pip install -r requirements.txt
+ - pip install -r test-requirements.txt
+ - pytest --cov=speechall
+
+pytest-3.7:
+ extends: .pytest
+ image: python:3.7-alpine
+pytest-3.8:
+ extends: .pytest
+ image: python:3.8-alpine
+pytest-3.9:
+ extends: .pytest
+ image: python:3.9-alpine
+pytest-3.10:
+ extends: .pytest
+ image: python:3.10-alpine
+pytest-3.11:
+ extends: .pytest
+ image: python:3.11-alpine
diff --git a/.openapi-generator-ignore b/.openapi-generator-ignore
new file mode 100644
index 0000000..3702dbb
--- /dev/null
+++ b/.openapi-generator-ignore
@@ -0,0 +1,41 @@
+# OpenAPI Generator Ignore
+# Generated by openapi-generator https://github.com/openapitools/openapi-generator
+
+# Use this file to prevent files from being overwritten by the generator.
+# The patterns follow closely to .gitignore or .dockerignore.
+
+# As an example, the C# client generator defines ApiClient.cs.
+# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line:
+#ApiClient.cs
+
+# You can match any string of characters against a directory, file or extension with a single asterisk (*):
+#foo/*/qux
+# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux
+
+# You can recursively match patterns against a directory, file or extension with a double asterisk (**):
+#foo/**/qux
+# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux
+
+# You can also negate patterns with an exclamation (!).
+# For example, you can ignore all files in a docs folder with the file extension .md:
+#docs/*.md
+# Then explicitly reverse the ignore rule for a single file:
+#!docs/README.md
+
+# Custom files to preserve during regeneration
+example_transcribe.py
+simple_example.py
+EXAMPLE_README.md
+REGENERATION_GUIDE.md
+pyproject.toml
+uv.lock
+.venv/**
+*.pyc
+__pycache__/**
+Makefile
+regenerate.sh
+fix_transcription_response.py
+.gitignore
+.openapi-generator-ignore
+# Ignore OpenAI-Compatible Speech-to-Text Endpoints
+speechall/api/open_ai_compatible_speech_to_text_api.py
diff --git a/.openapi-generator/FILES b/.openapi-generator/FILES
new file mode 100644
index 0000000..d4489b1
--- /dev/null
+++ b/.openapi-generator/FILES
@@ -0,0 +1,95 @@
+.github/workflows/python.yml
+.gitignore
+.gitlab-ci.yml
+.openapi-generator-ignore
+.travis.yml
+README.md
+docs/BaseTranscriptionConfiguration.md
+docs/CreateReplacementRuleset201Response.md
+docs/CreateReplacementRulesetRequest.md
+docs/ErrorResponse.md
+docs/ExactRule.md
+docs/OpenAICreateTranslationRequestModel.md
+docs/OpenaiCompatibleCreateTranscription200Response.md
+docs/OpenaiCompatibleCreateTranslation200Response.md
+docs/RegexGroupRule.md
+docs/RegexRule.md
+docs/RemoteTranscriptionConfiguration.md
+docs/ReplacementRule.md
+docs/ReplacementRulesApi.md
+docs/SpeechToTextApi.md
+docs/SpeechToTextModel.md
+docs/TranscriptLanguageCode.md
+docs/TranscriptOutputFormat.md
+docs/TranscriptionDetailed.md
+docs/TranscriptionModelIdentifier.md
+docs/TranscriptionOnlyText.md
+docs/TranscriptionProvider.md
+docs/TranscriptionResponse.md
+docs/TranscriptionSegment.md
+docs/TranscriptionWord.md
+git_push.sh
+pyproject.toml
+requirements.txt
+setup.cfg
+setup.py
+speechall/__init__.py
+speechall/api/__init__.py
+speechall/api/replacement_rules_api.py
+speechall/api/speech_to_text_api.py
+speechall/api_client.py
+speechall/api_response.py
+speechall/configuration.py
+speechall/exceptions.py
+speechall/models/__init__.py
+speechall/models/base_transcription_configuration.py
+speechall/models/create_replacement_ruleset201_response.py
+speechall/models/create_replacement_ruleset_request.py
+speechall/models/error_response.py
+speechall/models/exact_rule.py
+speechall/models/open_ai_create_translation_request_model.py
+speechall/models/openai_compatible_create_transcription200_response.py
+speechall/models/openai_compatible_create_translation200_response.py
+speechall/models/regex_group_rule.py
+speechall/models/regex_rule.py
+speechall/models/remote_transcription_configuration.py
+speechall/models/replacement_rule.py
+speechall/models/speech_to_text_model.py
+speechall/models/transcript_language_code.py
+speechall/models/transcript_output_format.py
+speechall/models/transcription_detailed.py
+speechall/models/transcription_model_identifier.py
+speechall/models/transcription_only_text.py
+speechall/models/transcription_provider.py
+speechall/models/transcription_response.py
+speechall/models/transcription_segment.py
+speechall/models/transcription_word.py
+speechall/py.typed
+speechall/rest.py
+test-requirements.txt
+test/__init__.py
+test/test_base_transcription_configuration.py
+test/test_create_replacement_ruleset201_response.py
+test/test_create_replacement_ruleset_request.py
+test/test_error_response.py
+test/test_exact_rule.py
+test/test_open_ai_create_translation_request_model.py
+test/test_openai_compatible_create_transcription200_response.py
+test/test_openai_compatible_create_translation200_response.py
+test/test_regex_group_rule.py
+test/test_regex_rule.py
+test/test_remote_transcription_configuration.py
+test/test_replacement_rule.py
+test/test_replacement_rules_api.py
+test/test_speech_to_text_api.py
+test/test_speech_to_text_model.py
+test/test_transcript_language_code.py
+test/test_transcript_output_format.py
+test/test_transcription_detailed.py
+test/test_transcription_model_identifier.py
+test/test_transcription_only_text.py
+test/test_transcription_provider.py
+test/test_transcription_response.py
+test/test_transcription_segment.py
+test/test_transcription_word.py
+tox.ini
diff --git a/.openapi-generator/VERSION b/.openapi-generator/VERSION
new file mode 100644
index 0000000..eb1dc6a
--- /dev/null
+++ b/.openapi-generator/VERSION
@@ -0,0 +1 @@
+7.13.0
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..d72bfe5
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,17 @@
+# ref: https://docs.travis-ci.com/user/languages/python
+language: python
+python:
+ - "3.7"
+ - "3.8"
+ - "3.9"
+ - "3.10"
+ - "3.11"
+ # uncomment the following if needed
+ #- "3.11-dev" # 3.11 development branch
+ #- "nightly" # nightly build
+# command to install dependencies
+install:
+ - "pip install -r requirements.txt"
+ - "pip install -r test-requirements.txt"
+# command to run tests
+script: pytest --cov=speechall
diff --git a/EXAMPLE_README.md b/EXAMPLE_README.md
new file mode 100644
index 0000000..9471959
--- /dev/null
+++ b/EXAMPLE_README.md
@@ -0,0 +1,126 @@
+# Speechall Python SDK Example
+
+This repository contains a Python SDK for the Speechall API, generated using OpenAPI Generator, with example scripts demonstrating how to use the transcribe endpoint.
+
+## Quick Start
+
+### 1. Install Dependencies
+
+Make sure you have `uv` installed, then run:
+
+```bash
+uv sync
+```
+
+### 2. Set Up Authentication
+
+Set your Speechall API token as an environment variable:
+
+```bash
+export SPEECHALL_API_TOKEN="your-api-token-here"
+```
+
+### 3. Run the Example
+
+```bash
+uv run python example_transcribe.py
+```
+
+## Features Demonstrated
+
+The example script shows how to:
+
+- **List Available Models**: Get all available speech-to-text models and their capabilities
+- **Transcribe Local Files**: Upload and transcribe audio files from your local machine
+- **Transcribe Remote URLs**: Transcribe audio files directly from URLs
+- **Advanced Features**: Use speaker diarization, custom vocabulary, and smart formatting
+
+## Available Models
+
+The SDK supports numerous speech-to-text providers and models, including:
+
+- **OpenAI**: `openai.whisper-1`, `openai.gpt-4o-transcribe`
+- **Deepgram**: `deepgram.nova-2`, `deepgram.nova-3`, `deepgram.whisper-large`
+- **AssemblyAI**: `assemblyai.best`, `assemblyai.nano`
+- **Google**: `google.enhanced`, `google.standard`
+- **Azure**: `azure.standard`
+- **Groq**: `groq.whisper-large-v3`, `groq.whisper-large-v3-turbo`
+- And many more!
+
+## Example Usage
+
+### Basic Transcription
+
+```python
+from openapi_client import ApiClient, Configuration
+from openapi_client.api.speech_to_text_api import SpeechToTextApi
+from openapi_client.models.transcription_model_identifier import TranscriptionModelIdentifier
+
+# Set up client
+configuration = Configuration()
+configuration.access_token = "your-api-token"
+api_client = ApiClient(configuration)
+api_instance = SpeechToTextApi(api_client)
+
+# Transcribe audio file
+with open("audio.wav", "rb") as f:
+ result = api_instance.transcribe(
+ model=TranscriptionModelIdentifier.OPENAI_DOT_WHISPER_MINUS_1,
+ body=f.read(),
+ language="en"
+ )
+ print(result)
+```
+
+### Advanced Features
+
+```python
+# Use advanced features like diarization and custom vocabulary
+result = api_instance.transcribe(
+ model=TranscriptionModelIdentifier.DEEPGRAM_DOT_NOVA_MINUS_2,
+ body=audio_data,
+ language="en",
+ output_format="verbose_json",
+ diarization=True,
+ custom_vocabulary=["technical", "terms"],
+ speakers_expected=2
+)
+```
+
+## Supported Audio Formats
+
+The API supports various audio formats including:
+- WAV
+- MP3
+- FLAC
+- OGG
+- M4A
+- And more (depends on the selected model/provider)
+
+## Error Handling
+
+The SDK includes proper error handling for common scenarios:
+
+```python
+from openapi_client.exceptions import ApiException
+
+try:
+ result = api_instance.transcribe(...)
+except ApiException as e:
+ print(f"API Error: {e}")
+except Exception as e:
+ print(f"Unexpected error: {e}")
+```
+
+## Next Steps
+
+1. Get your API token from the Speechall dashboard
+2. Replace the example audio file path with your actual audio file
+3. Experiment with different models and parameters
+4. Check the [Speechall API documentation](https://docs.speechall.com) for more details
+
+## Support
+
+For support and questions:
+- Check the [Speechall documentation](https://docs.speechall.com)
+- Contact support at team@speechall.com
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..c4a1e70
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,104 @@
+# Speechall Python SDK Makefile
+
+# Configuration
+OPENAPI_SPEC_PATH = ../speechall-openapi/openapi.yaml
+GENERATOR = python-pydantic-v1
+OUTPUT_DIR = .
+
+.PHONY: help install regenerate test example clean
+
+# Default target
+help:
+ @echo "๐๏ธ Speechall Python SDK Commands"
+ @echo "=================================="
+ @echo ""
+ @echo "๐ฆ Setup:"
+ @echo " make install - Install dependencies with uv"
+ @echo ""
+ @echo "๐ Code generation:"
+ @echo " make regenerate - Regenerate client from OpenAPI spec"
+ @echo " make force-regen - Force regenerate (skip validation)"
+ @echo " make fix - Apply TranscriptionResponse oneOf fix"
+ @echo ""
+ @echo "๐งช Testing:"
+ @echo " make test - Run tests"
+ @echo " make example - Run example script"
+ @echo ""
+ @echo "๐งน Cleanup:"
+ @echo " make clean - Clean generated files and cache"
+
+# Install dependencies
+install:
+ @echo "๐ฆ Installing dependencies..."
+ uv sync
+
+# Regenerate client from OpenAPI spec
+regenerate:
+ @echo "๐ Regenerating OpenAPI client..."
+ @if [ ! -f "$(OPENAPI_SPEC_PATH)" ]; then \
+ echo "โ Error: OpenAPI spec not found at $(OPENAPI_SPEC_PATH)"; \
+ echo "Please ensure the speechall-openapi repository is cloned"; \
+ exit 1; \
+ fi
+ ./regenerate.sh
+
+# Force regenerate (for development)
+force-regen:
+ @echo "๐ Force regenerating OpenAPI client..."
+ openapi-generator generate \
+ -i $(OPENAPI_SPEC_PATH) \
+ -g $(GENERATOR) \
+ -o $(OUTPUT_DIR) \
+ --skip-validate-spec
+ @echo "โ ๏ธ Note: This may overwrite custom files!"
+
+# Apply TranscriptionResponse oneOf fix
+fix:
+ @echo "๐ง Applying TranscriptionResponse oneOf fix..."
+ python3 fix_transcription_response.py
+
+# Run tests
+test:
+ @echo "๐งช Running tests..."
+ uv run python -m pytest test/ -v
+
+# Run example script
+example:
+ @echo "๐ค Running example script..."
+ @if [ -z "$$SPEECHALL_API_TOKEN" ]; then \
+ echo "โ ๏ธ Warning: SPEECHALL_API_TOKEN not set"; \
+ echo "Set it with: export SPEECHALL_API_TOKEN='your-token'"; \
+ fi
+ uv run python example_transcribe.py
+
+# Run simple example
+simple:
+ @echo "๐ค Running simple example..."
+ @if [ -z "$$SPEECHALL_API_TOKEN" ]; then \
+ echo "โ ๏ธ Warning: SPEECHALL_API_TOKEN not set"; \
+ echo "Set it with: export SPEECHALL_API_TOKEN='your-token'"; \
+ fi
+ uv run python simple_example.py
+
+# Clean up generated files and cache
+clean:
+ @echo "๐งน Cleaning up..."
+ find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
+ find . -name "*.pyc" -delete 2>/dev/null || true
+ rm -rf backup_*/ 2>/dev/null || true
+ @echo "โ
Cleanup complete"
+
+# Development helpers
+check-spec:
+ @echo "๐ Checking OpenAPI specification..."
+ @if [ -f "$(OPENAPI_SPEC_PATH)" ]; then \
+ echo "โ
OpenAPI spec found at $(OPENAPI_SPEC_PATH)"; \
+ openapi-generator validate -i $(OPENAPI_SPEC_PATH) || echo "โ ๏ธ Validation warnings found"; \
+ else \
+ echo "โ OpenAPI spec not found at $(OPENAPI_SPEC_PATH)"; \
+ fi
+
+# List available models (requires API token)
+list-models:
+ @echo "๐ Listing available models..."
+ @uv run python -c "import os; from openapi_client import *; from openapi_client.api.speech_to_text_api import SpeechToTextApi; config = Configuration(); config.access_token = os.getenv('SPEECHALL_API_TOKEN'); api = SpeechToTextApi(ApiClient(config)); [print(f'{m.model_id}: {m.display_name}') for m in api.list_speech_to_text_models()[:10]]" 2>/dev/null || echo "โ Failed to list models (check your API token)"
\ No newline at end of file
diff --git a/README.md b/README.md
index 0862491..8a1c1ab 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,150 @@
-# speechall-python-sdk
-Python SDK for the Speechall API - The unified speech-to-text service
+# speechall
+The Speechall REST API provides powerful and flexible speech-to-text capabilities.
+It allows you to transcribe audio files using various underlying STT providers and models,
+optionally apply custom text replacement rules, and access results in multiple formats.
+The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+
+This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project:
+
+- API version: 0.1.0
+- Package version: 0.1.0
+- Generator version: 7.13.0
+- Build package: org.openapitools.codegen.languages.PythonPydanticV1ClientCodegen
+For more information, please visit [https://speechall.com/contact](https://speechall.com/contact)
+
+## Requirements.
+
+Python 3.7+
+
+## Installation & Usage
+### pip install
+
+If the python package is hosted on a repository, you can install directly using:
+
+```sh
+pip install git+https://github.com/GIT_USER_ID/GIT_REPO_ID.git
+```
+(you may need to run `pip` with root permission: `sudo pip install git+https://github.com/GIT_USER_ID/GIT_REPO_ID.git`)
+
+Then import the package:
+```python
+import speechall
+```
+
+### Setuptools
+
+Install via [Setuptools](http://pypi.python.org/pypi/setuptools).
+
+```sh
+python setup.py install --user
+```
+(or `sudo python setup.py install` to install the package for all users)
+
+Then import the package:
+```python
+import speechall
+```
+
+### Tests
+
+Execute `pytest` to run the tests.
+
+## Getting Started
+
+Please follow the [installation procedure](#installation--usage) and then run the following:
+
+```python
+
+import time
+import speechall
+from speechall.rest import ApiException
+from pprint import pprint
+
+# Defining the host is optional and defaults to https://api.speechall.com/v1
+# See configuration.py for a list of all supported configuration parameters.
+configuration = speechall.Configuration(
+ host = "https://api.speechall.com/v1"
+)
+
+# The client must configure the authentication and authorization parameters
+# in accordance with the API server security policy.
+# Examples for each auth method are provided below, use the example that
+# satisfies your auth use case.
+
+# Configure Bearer authorization (API Key): bearerAuth
+configuration = speechall.Configuration(
+ access_token = os.environ["BEARER_TOKEN"]
+)
+
+
+# Enter a context with an instance of the API client
+with speechall.ApiClient(configuration) as api_client:
+ # Create an instance of the API class
+ api_instance = speechall.ReplacementRulesApi(api_client)
+ create_replacement_ruleset_request = {"name":"Acme Corp Corrections","rules":[{"kind":"exact","search":"speechal","replacement":"Speechall","caseSensitive":false},{"kind":"regex","pattern":"\\b(\\d{3})-(\\d{2})-(\\d{4})\\b","replacement":"[REDACTED SSN]","flags":["i"]}]} # CreateReplacementRulesetRequest | JSON object containing the name for the ruleset and an array of replacement rule objects.
+
+ try:
+ # Create a reusable set of text replacement rules.
+ api_response = api_instance.create_replacement_ruleset(create_replacement_ruleset_request)
+ print("The response of ReplacementRulesApi->create_replacement_ruleset:\n")
+ pprint(api_response)
+ except ApiException as e:
+ print("Exception when calling ReplacementRulesApi->create_replacement_ruleset: %s\n" % e)
+
+```
+
+## Documentation for API Endpoints
+
+All URIs are relative to *https://api.speechall.com/v1*
+
+Class | Method | HTTP request | Description
+------------ | ------------- | ------------- | -------------
+*ReplacementRulesApi* | [**create_replacement_ruleset**](docs/ReplacementRulesApi.md#create_replacement_ruleset) | **POST** /replacement-rulesets | Create a reusable set of text replacement rules.
+*SpeechToTextApi* | [**list_speech_to_text_models**](docs/SpeechToTextApi.md#list_speech_to_text_models) | **GET** /speech-to-text-models | Retrieve a list of all available speech-to-text models.
+*SpeechToTextApi* | [**transcribe**](docs/SpeechToTextApi.md#transcribe) | **POST** /transcribe | Upload an audio file directly and receive a transcription.
+*SpeechToTextApi* | [**transcribe_remote**](docs/SpeechToTextApi.md#transcribe_remote) | **POST** /transcribe-remote | Transcribe an audio file located at a remote URL.
+
+
+## Documentation For Models
+
+ - [BaseTranscriptionConfiguration](docs/BaseTranscriptionConfiguration.md)
+ - [CreateReplacementRuleset201Response](docs/CreateReplacementRuleset201Response.md)
+ - [CreateReplacementRulesetRequest](docs/CreateReplacementRulesetRequest.md)
+ - [ErrorResponse](docs/ErrorResponse.md)
+ - [ExactRule](docs/ExactRule.md)
+ - [OpenAICreateTranslationRequestModel](docs/OpenAICreateTranslationRequestModel.md)
+ - [OpenaiCompatibleCreateTranscription200Response](docs/OpenaiCompatibleCreateTranscription200Response.md)
+ - [OpenaiCompatibleCreateTranslation200Response](docs/OpenaiCompatibleCreateTranslation200Response.md)
+ - [RegexGroupRule](docs/RegexGroupRule.md)
+ - [RegexRule](docs/RegexRule.md)
+ - [RemoteTranscriptionConfiguration](docs/RemoteTranscriptionConfiguration.md)
+ - [ReplacementRule](docs/ReplacementRule.md)
+ - [SpeechToTextModel](docs/SpeechToTextModel.md)
+ - [TranscriptLanguageCode](docs/TranscriptLanguageCode.md)
+ - [TranscriptOutputFormat](docs/TranscriptOutputFormat.md)
+ - [TranscriptionDetailed](docs/TranscriptionDetailed.md)
+ - [TranscriptionModelIdentifier](docs/TranscriptionModelIdentifier.md)
+ - [TranscriptionOnlyText](docs/TranscriptionOnlyText.md)
+ - [TranscriptionProvider](docs/TranscriptionProvider.md)
+ - [TranscriptionResponse](docs/TranscriptionResponse.md)
+ - [TranscriptionSegment](docs/TranscriptionSegment.md)
+ - [TranscriptionWord](docs/TranscriptionWord.md)
+
+
+
+## Documentation For Authorization
+
+
+Authentication schemes defined for the API:
+
+### bearerAuth
+
+- **Type**: Bearer authentication (API Key)
+
+
+## Author
+
+
+
+
diff --git a/REGENERATION_GUIDE.md b/REGENERATION_GUIDE.md
new file mode 100644
index 0000000..82a15c5
--- /dev/null
+++ b/REGENERATION_GUIDE.md
@@ -0,0 +1,290 @@
+# OpenAPI Client Regeneration Guide
+
+This guide explains how to regenerate the Speechall Python SDK when the OpenAPI specification changes.
+
+## Quick Start
+
+### Method 1: Using the Script (Recommended)
+```bash
+./regenerate.sh
+```
+
+### Method 2: Using Make
+```bash
+make regenerate
+```
+
+### Method 3: Manual Command
+```bash
+openapi-generator generate -i ../speechall-openapi/openapi.yaml -g python-pydantic-v1 -o .
+```
+
+## Prerequisites
+
+### 1. OpenAPI Generator
+Install the OpenAPI Generator:
+
+```bash
+# Using npm (recommended)
+npm install @openapitools/openapi-generator-cli -g
+
+# Using brew (macOS)
+brew install openapi-generator
+
+# Using docker (alternative)
+# See: https://openapi-generator.tech/docs/installation
+```
+
+### 2. OpenAPI Specification
+Ensure the OpenAPI specification is available at:
+```
+../speechall-openapi/openapi.yaml
+```
+
+Or clone the repository:
+```bash
+cd ..
+git clone https://github.com/speechall/speechall-openapi.git
+cd speechall-python-sdk
+```
+
+## Protected Files
+
+The following files are protected from regeneration and will be preserved:
+
+### Custom Code Files
+- `example_transcribe.py` - Comprehensive example script
+- `simple_example.py` - Simple example script
+- `EXAMPLE_README.md` - Examples documentation
+- `REGENERATION_GUIDE.md` - This guide
+
+### Configuration Files
+- `pyproject.toml` - Modified for uv package management
+- `uv.lock` - Dependency lock file
+- `.venv/` - Virtual environment
+- `Makefile` - Build automation
+- `regenerate.sh` - Regeneration script
+
+### Automatic Fix Scripts
+- `fix_transcription_response.py` - Automatically fixes the oneOf validation issue in TranscriptionResponse
+
+### Generated Files (Will Be Regenerated)
+- `openapi_client/` - All client code
+- `docs/` - API documentation
+- `test/` - Generated test files
+- `requirements.txt` - Requirements file
+- `setup.py` - Setup script
+- `README.md` - Generated README
+
+## Regeneration Workflow
+
+### Step 1: Backup Custom Changes
+The regeneration script automatically creates backups:
+```bash
+backup_YYYYMMDD_HHMMSS/
+โโโ example_transcribe.py
+โโโ simple_example.py
+โโโ EXAMPLE_README.md
+โโโ pyproject.toml
+```
+
+### Step 2: Regenerate Client Code
+The script runs:
+```bash
+openapi-generator generate \
+ -i ../speechall-openapi/openapi.yaml \
+ -g python-pydantic-v1 \
+ -o . \
+ --skip-validate-spec
+```
+
+### Step 3: Restore Custom Configuration
+- Restores the custom `pyproject.toml` for uv compatibility
+- Keeps example scripts intact
+- Preserves custom documentation
+
+### Step 3.5: Apply Automatic Fixes
+- Runs `fix_transcription_response.py` to automatically fix the oneOf validation issue
+- Ensures the generated code works correctly without manual intervention
+
+### Step 4: Update Dependencies
+```bash
+uv sync # or pip install -r requirements.txt
+```
+
+## Advanced Usage
+
+### Force Regeneration
+To regenerate without safeguards (โ ๏ธ **USE WITH CAUTION**):
+```bash
+make force-regen
+```
+
+### Custom OpenAPI Spec Location
+Edit the script or Makefile to change the spec path:
+```bash
+# In regenerate.sh or Makefile
+OPENAPI_SPEC_PATH="path/to/your/openapi.yaml"
+```
+
+### Different Generator
+To use a different generator:
+```bash
+openapi-generator generate \
+ -i ../speechall-openapi/openapi.yaml \
+ -g python \
+ -o .
+```
+
+Available Python generators:
+- `python` - Standard Python client
+- `python-pydantic-v1` - Pydantic v1 models (current)
+- `python-fastapi` - FastAPI compatible
+- `python-flask` - Flask compatible
+
+## Testing After Regeneration
+
+### 1. Verify Installation
+```bash
+uv sync
+```
+
+### 2. Test Imports
+```bash
+uv run python -c "from openapi_client.api.speech_to_text_api import SpeechToTextApi; print('โ
Imports work!')"
+```
+
+### 3. Run Examples
+```bash
+# Set your API token first
+export SPEECHALL_API_TOKEN="your-token-here"
+
+# Run examples
+make example
+make simple
+```
+
+### 4. Check for Breaking Changes
+Review the generated code for:
+- New or removed models
+- Changed method signatures
+- New API endpoints
+- Deprecated features
+
+## Common Issues & Solutions
+
+### Issue: "openapi-generator command not found"
+**Solution:** Install OpenAPI Generator:
+```bash
+npm install @openapitools/openapi-generator-cli -g
+```
+
+### Issue: "OpenAPI spec not found"
+**Solution:** Ensure the spec file exists:
+```bash
+ls -la ../speechall-openapi/openapi.yaml
+```
+
+### Issue: Custom pyproject.toml overwritten
+**Solution:** The regeneration script should handle this automatically. If not:
+```bash
+# Restore from backup
+cp backup_*/pyproject.toml ./pyproject.toml
+uv sync
+```
+
+### Issue: Import errors after regeneration
+**Solution:** Reinstall dependencies:
+```bash
+uv sync
+# or
+pip install -r requirements.txt
+```
+
+### Issue: "Multiple matches found when deserializing TranscriptionResponse"
+**Problem:** When one model is a superset of another (e.g., `TranscriptionDetailed` contains all fields of `TranscriptionOnlyText` plus optional ones), the generated oneOf validation fails because both schemas match.
+
+**Automatic Solution:** The regeneration script now automatically applies the fix via `fix_transcription_response.py`. No manual intervention required!
+
+**Manual Solution (if automatic fix fails):** Run the fix script manually:
+```bash
+python3 fix_transcription_response.py
+```
+
+The fix script:
+- Detects if the fix is already applied to avoid duplicate changes
+- Modifies the `from_json` method to try the more specific schema first
+- Updates the validator to prevent "multiple matches" errors
+- Is preserved during regeneration (listed in `.openapi-generator-ignore`)
+
+## Best Practices
+
+### 1. Always Use Version Control
+Commit your changes before regenerating:
+```bash
+git add .
+git commit -m "Before regenerating OpenAPI client"
+./regenerate.sh
+```
+
+### 2. Test Thoroughly
+After regeneration, test all your custom code:
+- Run example scripts
+- Test API calls
+- Verify model compatibility
+
+### 3. Update Examples
+If new features are added to the API:
+- Update example scripts to showcase new capabilities
+- Add new models to the examples
+- Update documentation
+
+### 4. Handle Breaking Changes
+- Check the API changelog
+- Update method calls if signatures changed
+- Add migration notes for users
+
+## Automation Options
+
+### GitHub Actions
+Create `.github/workflows/regenerate.yml`:
+```yaml
+name: Regenerate Client
+on:
+ repository_dispatch:
+ types: [openapi-updated]
+
+jobs:
+ regenerate:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup Node.js
+ uses: actions/setup-node@v3
+ with:
+ node-version: '18'
+ - name: Install OpenAPI Generator
+ run: npm install @openapitools/openapi-generator-cli -g
+ - name: Regenerate Client
+ run: ./regenerate.sh
+ - name: Create Pull Request
+ # Use a PR creation action
+```
+
+### Pre-commit Hook
+Add to `.git/hooks/pre-commit`:
+```bash
+#!/bin/bash
+# Check if OpenAPI spec has changed
+if git diff --cached --name-only | grep -q "../speechall-openapi/openapi.yaml"; then
+ echo "โ ๏ธ OpenAPI spec changed. Consider regenerating the client."
+ echo "Run: ./regenerate.sh"
+fi
+```
+
+## Support
+
+If you encounter issues with regeneration:
+1. Check this guide for common solutions
+2. Review the OpenAPI Generator documentation
+3. Contact the Speechall team for API-specific questions
\ No newline at end of file
diff --git a/docs/BaseTranscriptionConfiguration.md b/docs/BaseTranscriptionConfiguration.md
new file mode 100644
index 0000000..68b4e35
--- /dev/null
+++ b/docs/BaseTranscriptionConfiguration.md
@@ -0,0 +1,40 @@
+# BaseTranscriptionConfiguration
+
+Common configuration options for transcription, applicable to both direct uploads and remote URLs.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**model** | [**TranscriptionModelIdentifier**](TranscriptionModelIdentifier.md) | |
+**language** | [**TranscriptLanguageCode**](TranscriptLanguageCode.md) | | [optional]
+**output_format** | [**TranscriptOutputFormat**](TranscriptOutputFormat.md) | | [optional]
+**ruleset_id** | **str** | The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. | [optional]
+**punctuation** | **bool** | Whether to add punctuation. Support varies by model (e.g., Deepgram, AssemblyAI). Defaults to `true`. | [optional] [default to True]
+**timestamp_granularity** | **str** | Level of timestamp detail (`word` or `segment`). Defaults to `segment`. | [optional] [default to 'segment']
+**diarization** | **bool** | Enable speaker diarization. Defaults to `false`. | [optional] [default to False]
+**initial_prompt** | **str** | Optional text prompt to guide the transcription model. Support varies (e.g., OpenAI). | [optional]
+**temperature** | **float** | Controls output randomness for supported models (e.g., OpenAI). Value between 0 and 1. | [optional]
+**smart_format** | **bool** | Enable provider-specific smart formatting (e.g., Deepgram). Defaults vary. | [optional]
+**speakers_expected** | **int** | Hint for the number of expected speakers for diarization (e.g., RevAI, Deepgram). | [optional]
+**custom_vocabulary** | **List[str]** | List of custom words/phrases to improve recognition (e.g., Deepgram, AssemblyAI). | [optional]
+
+## Example
+
+```python
+from speechall.models.base_transcription_configuration import BaseTranscriptionConfiguration
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of BaseTranscriptionConfiguration from a JSON string
+base_transcription_configuration_instance = BaseTranscriptionConfiguration.from_json(json)
+# print the JSON string representation of the object
+print BaseTranscriptionConfiguration.to_json()
+
+# convert the object into a dict
+base_transcription_configuration_dict = base_transcription_configuration_instance.to_dict()
+# create an instance of BaseTranscriptionConfiguration from a dict
+base_transcription_configuration_from_dict = BaseTranscriptionConfiguration.from_dict(base_transcription_configuration_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/CreateReplacementRuleset201Response.md b/docs/CreateReplacementRuleset201Response.md
new file mode 100644
index 0000000..c5857d0
--- /dev/null
+++ b/docs/CreateReplacementRuleset201Response.md
@@ -0,0 +1,28 @@
+# CreateReplacementRuleset201Response
+
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**id** | **str** | The unique identifier (UUID) generated for this ruleset. Use this ID in the `ruleset_id` parameter of transcription requests. |
+
+## Example
+
+```python
+from speechall.models.create_replacement_ruleset201_response import CreateReplacementRuleset201Response
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of CreateReplacementRuleset201Response from a JSON string
+create_replacement_ruleset201_response_instance = CreateReplacementRuleset201Response.from_json(json)
+# print the JSON string representation of the object
+print CreateReplacementRuleset201Response.to_json()
+
+# convert the object into a dict
+create_replacement_ruleset201_response_dict = create_replacement_ruleset201_response_instance.to_dict()
+# create an instance of CreateReplacementRuleset201Response from a dict
+create_replacement_ruleset201_response_from_dict = CreateReplacementRuleset201Response.from_dict(create_replacement_ruleset201_response_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/CreateReplacementRulesetRequest.md b/docs/CreateReplacementRulesetRequest.md
new file mode 100644
index 0000000..ad8a4af
--- /dev/null
+++ b/docs/CreateReplacementRulesetRequest.md
@@ -0,0 +1,29 @@
+# CreateReplacementRulesetRequest
+
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**name** | **str** | A user-defined name for this ruleset for easier identification. |
+**rules** | [**List[ReplacementRule]**](ReplacementRule.md) | An ordered array of replacement rules. Rules are applied in the order they appear in this list. See the `ReplacementRule` schema for different rule types (exact, regex, regex_group). |
+
+## Example
+
+```python
+from speechall.models.create_replacement_ruleset_request import CreateReplacementRulesetRequest
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of CreateReplacementRulesetRequest from a JSON string
+create_replacement_ruleset_request_instance = CreateReplacementRulesetRequest.from_json(json)
+# print the JSON string representation of the object
+print CreateReplacementRulesetRequest.to_json()
+
+# convert the object into a dict
+create_replacement_ruleset_request_dict = create_replacement_ruleset_request_instance.to_dict()
+# create an instance of CreateReplacementRulesetRequest from a dict
+create_replacement_ruleset_request_from_dict = CreateReplacementRulesetRequest.from_dict(create_replacement_ruleset_request_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/ErrorResponse.md b/docs/ErrorResponse.md
new file mode 100644
index 0000000..234f562
--- /dev/null
+++ b/docs/ErrorResponse.md
@@ -0,0 +1,29 @@
+# ErrorResponse
+
+Standard structure for error responses. May include additional properties depending on the error type.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**message** | **str** | A human-readable message describing the error. |
+
+## Example
+
+```python
+from speechall.models.error_response import ErrorResponse
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of ErrorResponse from a JSON string
+error_response_instance = ErrorResponse.from_json(json)
+# print the JSON string representation of the object
+print ErrorResponse.to_json()
+
+# convert the object into a dict
+error_response_dict = error_response_instance.to_dict()
+# create an instance of ErrorResponse from a dict
+error_response_from_dict = ErrorResponse.from_dict(error_response_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/ExactRule.md b/docs/ExactRule.md
new file mode 100644
index 0000000..59ad5e3
--- /dev/null
+++ b/docs/ExactRule.md
@@ -0,0 +1,32 @@
+# ExactRule
+
+Defines a replacement rule based on finding an exact string match.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**kind** | **str** | Discriminator field identifying the rule type as 'exact'. |
+**search** | **str** | The exact text string to search for within the transcription. |
+**replacement** | **str** | The text string to replace the found 'search' text with. |
+**case_sensitive** | **bool** | If true, the search will match only if the case is identical. If false (default), the search ignores case. | [optional] [default to False]
+
+## Example
+
+```python
+from speechall.models.exact_rule import ExactRule
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of ExactRule from a JSON string
+exact_rule_instance = ExactRule.from_json(json)
+# print the JSON string representation of the object
+print ExactRule.to_json()
+
+# convert the object into a dict
+exact_rule_dict = exact_rule_instance.to_dict()
+# create an instance of ExactRule from a dict
+exact_rule_from_dict = ExactRule.from_dict(exact_rule_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/OpenAICreateTranslationRequestModel.md b/docs/OpenAICreateTranslationRequestModel.md
new file mode 100644
index 0000000..e9dc1b7
--- /dev/null
+++ b/docs/OpenAICreateTranslationRequestModel.md
@@ -0,0 +1,28 @@
+# OpenAICreateTranslationRequestModel
+
+ID of the model to use. It follows the naming convention provider/model-name
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+
+## Example
+
+```python
+from speechall.models.open_ai_create_translation_request_model import OpenAICreateTranslationRequestModel
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of OpenAICreateTranslationRequestModel from a JSON string
+open_ai_create_translation_request_model_instance = OpenAICreateTranslationRequestModel.from_json(json)
+# print the JSON string representation of the object
+print OpenAICreateTranslationRequestModel.to_json()
+
+# convert the object into a dict
+open_ai_create_translation_request_model_dict = open_ai_create_translation_request_model_instance.to_dict()
+# create an instance of OpenAICreateTranslationRequestModel from a dict
+open_ai_create_translation_request_model_from_dict = OpenAICreateTranslationRequestModel.from_dict(open_ai_create_translation_request_model_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/OpenaiCompatibleCreateTranscription200Response.md b/docs/OpenaiCompatibleCreateTranscription200Response.md
new file mode 100644
index 0000000..10aa712
--- /dev/null
+++ b/docs/OpenaiCompatibleCreateTranscription200Response.md
@@ -0,0 +1,32 @@
+# OpenaiCompatibleCreateTranscription200Response
+
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**language** | **str** | The language of the input audio. |
+**duration** | **float** | The duration of the input audio. |
+**text** | **str** | The transcribed text. |
+**words** | [**List[OpenAITranscriptionWord]**](OpenAITranscriptionWord.md) | Extracted words and their corresponding timestamps. | [optional]
+**segments** | [**List[OpenAITranscriptionSegment]**](OpenAITranscriptionSegment.md) | Segments of the transcribed text and their corresponding details. | [optional]
+
+## Example
+
+```python
+from speechall.models.openai_compatible_create_transcription200_response import OpenaiCompatibleCreateTranscription200Response
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of OpenaiCompatibleCreateTranscription200Response from a JSON string
+openai_compatible_create_transcription200_response_instance = OpenaiCompatibleCreateTranscription200Response.from_json(json)
+# print the JSON string representation of the object
+print OpenaiCompatibleCreateTranscription200Response.to_json()
+
+# convert the object into a dict
+openai_compatible_create_transcription200_response_dict = openai_compatible_create_transcription200_response_instance.to_dict()
+# create an instance of OpenaiCompatibleCreateTranscription200Response from a dict
+openai_compatible_create_transcription200_response_from_dict = OpenaiCompatibleCreateTranscription200Response.from_dict(openai_compatible_create_transcription200_response_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/OpenaiCompatibleCreateTranslation200Response.md b/docs/OpenaiCompatibleCreateTranslation200Response.md
new file mode 100644
index 0000000..2afe8d3
--- /dev/null
+++ b/docs/OpenaiCompatibleCreateTranslation200Response.md
@@ -0,0 +1,31 @@
+# OpenaiCompatibleCreateTranslation200Response
+
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**language** | **str** | The language of the output translation (always `english`). |
+**duration** | **str** | The duration of the input audio. |
+**text** | **str** | |
+**segments** | [**List[OpenAITranscriptionSegment]**](OpenAITranscriptionSegment.md) | Segments of the translated text and their corresponding details. | [optional]
+
+## Example
+
+```python
+from speechall.models.openai_compatible_create_translation200_response import OpenaiCompatibleCreateTranslation200Response
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of OpenaiCompatibleCreateTranslation200Response from a JSON string
+openai_compatible_create_translation200_response_instance = OpenaiCompatibleCreateTranslation200Response.from_json(json)
+# print the JSON string representation of the object
+print OpenaiCompatibleCreateTranslation200Response.to_json()
+
+# convert the object into a dict
+openai_compatible_create_translation200_response_dict = openai_compatible_create_translation200_response_instance.to_dict()
+# create an instance of OpenaiCompatibleCreateTranslation200Response from a dict
+openai_compatible_create_translation200_response_from_dict = OpenaiCompatibleCreateTranslation200Response.from_dict(openai_compatible_create_translation200_response_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/RegexGroupRule.md b/docs/RegexGroupRule.md
new file mode 100644
index 0000000..072bc8d
--- /dev/null
+++ b/docs/RegexGroupRule.md
@@ -0,0 +1,32 @@
+# RegexGroupRule
+
+Defines a replacement rule that uses regex capture groups to apply different replacements to different parts of the matched text.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**kind** | **str** | Discriminator field identifying the rule type as 'regex_group'. |
+**pattern** | **str** | The regular expression pattern containing capture groups `(...)`. The entire pattern must match for replacements to occur. |
+**group_replacements** | **Dict[str, str]** | An object where keys are capture group numbers (as strings, e.g., \"1\", \"2\") and values are the respective replacement strings for those groups. Groups not listed are kept as matched. The entire match is reconstructed using these replacements. |
+**flags** | **List[str]** | An array of flags to modify the regex behavior. | [optional]
+
+## Example
+
+```python
+from speechall.models.regex_group_rule import RegexGroupRule
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of RegexGroupRule from a JSON string
+regex_group_rule_instance = RegexGroupRule.from_json(json)
+# print the JSON string representation of the object
+print RegexGroupRule.to_json()
+
+# convert the object into a dict
+regex_group_rule_dict = regex_group_rule_instance.to_dict()
+# create an instance of RegexGroupRule from a dict
+regex_group_rule_from_dict = RegexGroupRule.from_dict(regex_group_rule_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/RegexRule.md b/docs/RegexRule.md
new file mode 100644
index 0000000..f371cad
--- /dev/null
+++ b/docs/RegexRule.md
@@ -0,0 +1,32 @@
+# RegexRule
+
+Defines a replacement rule based on matching a regular expression pattern.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**kind** | **str** | Discriminator field identifying the rule type as 'regex'. |
+**pattern** | **str** | The regular expression pattern to search for. Uses standard regex syntax (implementation specific, often PCRE-like). Remember to escape special characters if needed (e.g., `\\\\.` for a literal dot). |
+**replacement** | **str** | The replacement text. Can include backreferences to capture groups from the pattern, like `$1`, `$2`, etc. A literal `$` should be escaped (e.g., `$$`). |
+**flags** | **List[str]** | An array of flags to modify the regex behavior (e.g., 'i' for case-insensitivity). | [optional]
+
+## Example
+
+```python
+from speechall.models.regex_rule import RegexRule
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of RegexRule from a JSON string
+regex_rule_instance = RegexRule.from_json(json)
+# print the JSON string representation of the object
+print RegexRule.to_json()
+
+# convert the object into a dict
+regex_rule_dict = regex_rule_instance.to_dict()
+# create an instance of RegexRule from a dict
+regex_rule_from_dict = RegexRule.from_dict(regex_rule_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/RemoteTranscriptionConfiguration.md b/docs/RemoteTranscriptionConfiguration.md
new file mode 100644
index 0000000..fe5d3e0
--- /dev/null
+++ b/docs/RemoteTranscriptionConfiguration.md
@@ -0,0 +1,42 @@
+# RemoteTranscriptionConfiguration
+
+Configuration options for transcribing audio specified by a remote URL via the `/transcribe-remote` endpoint.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**model** | [**TranscriptionModelIdentifier**](TranscriptionModelIdentifier.md) | |
+**language** | [**TranscriptLanguageCode**](TranscriptLanguageCode.md) | | [optional]
+**output_format** | [**TranscriptOutputFormat**](TranscriptOutputFormat.md) | | [optional]
+**ruleset_id** | **str** | The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. | [optional]
+**punctuation** | **bool** | Whether to add punctuation. Support varies by model (e.g., Deepgram, AssemblyAI). Defaults to `true`. | [optional] [default to True]
+**timestamp_granularity** | **str** | Level of timestamp detail (`word` or `segment`). Defaults to `segment`. | [optional] [default to 'segment']
+**diarization** | **bool** | Enable speaker diarization. Defaults to `false`. | [optional] [default to False]
+**initial_prompt** | **str** | Optional text prompt to guide the transcription model. Support varies (e.g., OpenAI). | [optional]
+**temperature** | **float** | Controls output randomness for supported models (e.g., OpenAI). Value between 0 and 1. | [optional]
+**smart_format** | **bool** | Enable provider-specific smart formatting (e.g., Deepgram). Defaults vary. | [optional]
+**speakers_expected** | **int** | Hint for the number of expected speakers for diarization (e.g., RevAI, Deepgram). | [optional]
+**custom_vocabulary** | **List[str]** | List of custom words/phrases to improve recognition (e.g., Deepgram, AssemblyAI). | [optional]
+**file_url** | **str** | The publicly accessible URL of the audio file to transcribe. The API server must be able to fetch the audio from this URL. |
+**replacement_ruleset** | [**List[ReplacementRule]**](ReplacementRule.md) | An array of replacement rules to be applied directly to this transcription request, in order. This allows defining rules inline instead of (or in addition to) using a pre-saved `ruleset_id`. | [optional]
+
+## Example
+
+```python
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of RemoteTranscriptionConfiguration from a JSON string
+remote_transcription_configuration_instance = RemoteTranscriptionConfiguration.from_json(json)
+# print the JSON string representation of the object
+print RemoteTranscriptionConfiguration.to_json()
+
+# convert the object into a dict
+remote_transcription_configuration_dict = remote_transcription_configuration_instance.to_dict()
+# create an instance of RemoteTranscriptionConfiguration from a dict
+remote_transcription_configuration_from_dict = RemoteTranscriptionConfiguration.from_dict(remote_transcription_configuration_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/ReplacementRule.md b/docs/ReplacementRule.md
new file mode 100644
index 0000000..ab802cd
--- /dev/null
+++ b/docs/ReplacementRule.md
@@ -0,0 +1,35 @@
+# ReplacementRule
+
+Defines a single rule for finding and replacing text in a transcription. Use one of the specific rule types (`ExactRule`, `RegexRule`, `RegexGroupRule`). The `kind` property acts as a discriminator.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**kind** | **str** | Discriminator field identifying the rule type as 'regex_group'. |
+**search** | **str** | The exact text string to search for within the transcription. |
+**replacement** | **str** | The replacement text. Can include backreferences to capture groups from the pattern, like `$1`, `$2`, etc. A literal `$` should be escaped (e.g., `$$`). |
+**case_sensitive** | **bool** | If true, the search will match only if the case is identical. If false (default), the search ignores case. | [optional] [default to False]
+**pattern** | **str** | The regular expression pattern containing capture groups `(...)`. The entire pattern must match for replacements to occur. |
+**flags** | **List[str]** | An array of flags to modify the regex behavior. | [optional]
+**group_replacements** | **Dict[str, str]** | An object where keys are capture group numbers (as strings, e.g., \"1\", \"2\") and values are the respective replacement strings for those groups. Groups not listed are kept as matched. The entire match is reconstructed using these replacements. |
+
+## Example
+
+```python
+from speechall.models.replacement_rule import ReplacementRule
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of ReplacementRule from a JSON string
+replacement_rule_instance = ReplacementRule.from_json(json)
+# print the JSON string representation of the object
+print ReplacementRule.to_json()
+
+# convert the object into a dict
+replacement_rule_dict = replacement_rule_instance.to_dict()
+# create an instance of ReplacementRule from a dict
+replacement_rule_from_dict = ReplacementRule.from_dict(replacement_rule_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/ReplacementRulesApi.md b/docs/ReplacementRulesApi.md
new file mode 100644
index 0000000..cfd047f
--- /dev/null
+++ b/docs/ReplacementRulesApi.md
@@ -0,0 +1,96 @@
+# speechall.ReplacementRulesApi
+
+All URIs are relative to *https://api.speechall.com/v1*
+
+Method | HTTP request | Description
+------------- | ------------- | -------------
+[**create_replacement_ruleset**](ReplacementRulesApi.md#create_replacement_ruleset) | **POST** /replacement-rulesets | Create a reusable set of text replacement rules.
+
+
+# **create_replacement_ruleset**
+> CreateReplacementRuleset201Response create_replacement_ruleset(create_replacement_ruleset_request)
+
+Create a reusable set of text replacement rules.
+
+Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`.
+Rules within a set are applied sequentially to the transcription text.
+
+
+### Example
+
+* Bearer (API Key) Authentication (bearerAuth):
+```python
+import time
+import os
+import speechall
+from speechall.models.create_replacement_ruleset201_response import CreateReplacementRuleset201Response
+from speechall.models.create_replacement_ruleset_request import CreateReplacementRulesetRequest
+from speechall.rest import ApiException
+from pprint import pprint
+
+# Defining the host is optional and defaults to https://api.speechall.com/v1
+# See configuration.py for a list of all supported configuration parameters.
+configuration = speechall.Configuration(
+ host = "https://api.speechall.com/v1"
+)
+
+# The client must configure the authentication and authorization parameters
+# in accordance with the API server security policy.
+# Examples for each auth method are provided below, use the example that
+# satisfies your auth use case.
+
+# Configure Bearer authorization (API Key): bearerAuth
+configuration = speechall.Configuration(
+ access_token = os.environ["BEARER_TOKEN"]
+)
+
+# Enter a context with an instance of the API client
+with speechall.ApiClient(configuration) as api_client:
+ # Create an instance of the API class
+ api_instance = speechall.ReplacementRulesApi(api_client)
+ create_replacement_ruleset_request = {"name":"Acme Corp Corrections","rules":[{"kind":"exact","search":"speechal","replacement":"Speechall","caseSensitive":false},{"kind":"regex","pattern":"\\b(\\d{3})-(\\d{2})-(\\d{4})\\b","replacement":"[REDACTED SSN]","flags":["i"]}]} # CreateReplacementRulesetRequest | JSON object containing the name for the ruleset and an array of replacement rule objects.
+
+ try:
+ # Create a reusable set of text replacement rules.
+ api_response = api_instance.create_replacement_ruleset(create_replacement_ruleset_request)
+ print("The response of ReplacementRulesApi->create_replacement_ruleset:\n")
+ pprint(api_response)
+ except Exception as e:
+ print("Exception when calling ReplacementRulesApi->create_replacement_ruleset: %s\n" % e)
+```
+
+
+
+### Parameters
+
+Name | Type | Description | Notes
+------------- | ------------- | ------------- | -------------
+ **create_replacement_ruleset_request** | [**CreateReplacementRulesetRequest**](CreateReplacementRulesetRequest.md)| JSON object containing the name for the ruleset and an array of replacement rule objects. |
+
+### Return type
+
+[**CreateReplacementRuleset201Response**](CreateReplacementRuleset201Response.md)
+
+### Authorization
+
+[bearerAuth](../README.md#bearerAuth)
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json, text/plain
+
+### HTTP response details
+| Status code | Description | Response headers |
+|-------------|-------------|------------------|
+**201** | Ruleset created successfully. The response body contains the unique ID assigned to the new ruleset. | - |
+**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - |
+**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - |
+**402** | Payment Required - There is no credit left on your account. | - |
+**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
|
+**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - |
+**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - |
+**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - |
+
+[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
+
diff --git a/docs/SpeechToTextApi.md b/docs/SpeechToTextApi.md
new file mode 100644
index 0000000..9cbb9e3
--- /dev/null
+++ b/docs/SpeechToTextApi.md
@@ -0,0 +1,300 @@
+# speechall.SpeechToTextApi
+
+All URIs are relative to *https://api.speechall.com/v1*
+
+Method | HTTP request | Description
+------------- | ------------- | -------------
+[**list_speech_to_text_models**](SpeechToTextApi.md#list_speech_to_text_models) | **GET** /speech-to-text-models | Retrieve a list of all available speech-to-text models.
+[**transcribe**](SpeechToTextApi.md#transcribe) | **POST** /transcribe | Upload an audio file directly and receive a transcription.
+[**transcribe_remote**](SpeechToTextApi.md#transcribe_remote) | **POST** /transcribe-remote | Transcribe an audio file located at a remote URL.
+
+
+# **list_speech_to_text_models**
+> List[SpeechToTextModel] list_speech_to_text_models()
+
+Retrieve a list of all available speech-to-text models.
+
+Returns a detailed list of all STT models accessible through the Speechall API.
+Each model entry includes its identifier (`provider.model`), display name, description,
+supported features (languages, formats, punctuation, diarization), and performance characteristics.
+Use this endpoint to discover available models and their capabilities before making transcription requests.
+
+
+### Example
+
+* Bearer (API Key) Authentication (bearerAuth):
+```python
+import time
+import os
+import speechall
+from speechall.models.speech_to_text_model import SpeechToTextModel
+from speechall.rest import ApiException
+from pprint import pprint
+
+# Defining the host is optional and defaults to https://api.speechall.com/v1
+# See configuration.py for a list of all supported configuration parameters.
+configuration = speechall.Configuration(
+ host = "https://api.speechall.com/v1"
+)
+
+# The client must configure the authentication and authorization parameters
+# in accordance with the API server security policy.
+# Examples for each auth method are provided below, use the example that
+# satisfies your auth use case.
+
+# Configure Bearer authorization (API Key): bearerAuth
+configuration = speechall.Configuration(
+ access_token = os.environ["BEARER_TOKEN"]
+)
+
+# Enter a context with an instance of the API client
+with speechall.ApiClient(configuration) as api_client:
+ # Create an instance of the API class
+ api_instance = speechall.SpeechToTextApi(api_client)
+
+ try:
+ # Retrieve a list of all available speech-to-text models.
+ api_response = api_instance.list_speech_to_text_models()
+ print("The response of SpeechToTextApi->list_speech_to_text_models:\n")
+ pprint(api_response)
+ except Exception as e:
+ print("Exception when calling SpeechToTextApi->list_speech_to_text_models: %s\n" % e)
+```
+
+
+
+### Parameters
+This endpoint does not need any parameter.
+
+### Return type
+
+[**List[SpeechToTextModel]**](SpeechToTextModel.md)
+
+### Authorization
+
+[bearerAuth](../README.md#bearerAuth)
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json, text/plain
+
+### HTTP response details
+| Status code | Description | Response headers |
+|-------------|-------------|------------------|
+**200** | A list of available speech-to-text models and their properties. | - |
+**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - |
+**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - |
+**402** | Payment Required - There is no credit left on your account. | - |
+**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn't exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - |
+**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
|
+**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - |
+**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - |
+**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - |
+
+[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
+
+# **transcribe**
+> TranscriptionResponse transcribe(model, body, language=language, output_format=output_format, ruleset_id=ruleset_id, punctuation=punctuation, timestamp_granularity=timestamp_granularity, diarization=diarization, initial_prompt=initial_prompt, temperature=temperature, smart_format=smart_format, speakers_expected=speakers_expected, custom_vocabulary=custom_vocabulary)
+
+Upload an audio file directly and receive a transcription.
+
+This endpoint allows you to send raw audio data in the request body for transcription.
+You can specify the desired model, language, output format, and various provider-specific features using query parameters.
+Suitable for transcribing local audio files.
+
+
+### Example
+
+* Bearer (API Key) Authentication (bearerAuth):
+```python
+import time
+import os
+import speechall
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcription_response import TranscriptionResponse
+from speechall.rest import ApiException
+from pprint import pprint
+
+# Defining the host is optional and defaults to https://api.speechall.com/v1
+# See configuration.py for a list of all supported configuration parameters.
+configuration = speechall.Configuration(
+ host = "https://api.speechall.com/v1"
+)
+
+# The client must configure the authentication and authorization parameters
+# in accordance with the API server security policy.
+# Examples for each auth method are provided below, use the example that
+# satisfies your auth use case.
+
+# Configure Bearer authorization (API Key): bearerAuth
+configuration = speechall.Configuration(
+ access_token = os.environ["BEARER_TOKEN"]
+)
+
+# Enter a context with an instance of the API client
+with speechall.ApiClient(configuration) as api_client:
+ # Create an instance of the API class
+ api_instance = speechall.SpeechToTextApi(api_client)
+ model = speechall.TranscriptionModelIdentifier() # TranscriptionModelIdentifier | The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models.
+ body = None # bytearray | The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration.
+ language = speechall.TranscriptLanguageCode() # TranscriptLanguageCode | The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. (optional)
+ output_format = speechall.TranscriptOutputFormat() # TranscriptOutputFormat | The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. (optional)
+ ruleset_id = 'ruleset_id_example' # str | The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. (optional)
+ punctuation = True # bool | Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. (optional) (default to True)
+ timestamp_granularity = 'segment' # str | Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. (optional) (default to 'segment')
+ diarization = False # bool | Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. (optional) (default to False)
+ initial_prompt = 'initial_prompt_example' # str | An optional text prompt to provide context, guide the model's style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). (optional)
+ temperature = 3.4 # float | Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. (optional)
+ smart_format = True # bool | Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. (optional)
+ speakers_expected = 56 # int | Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). (optional)
+ custom_vocabulary = ['[\"Speechall\",\"Actondon\"]'] # List[str] | Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). (optional)
+
+ try:
+ # Upload an audio file directly and receive a transcription.
+ api_response = api_instance.transcribe(model, body, language=language, output_format=output_format, ruleset_id=ruleset_id, punctuation=punctuation, timestamp_granularity=timestamp_granularity, diarization=diarization, initial_prompt=initial_prompt, temperature=temperature, smart_format=smart_format, speakers_expected=speakers_expected, custom_vocabulary=custom_vocabulary)
+ print("The response of SpeechToTextApi->transcribe:\n")
+ pprint(api_response)
+ except Exception as e:
+ print("Exception when calling SpeechToTextApi->transcribe: %s\n" % e)
+```
+
+
+
+### Parameters
+
+Name | Type | Description | Notes
+------------- | ------------- | ------------- | -------------
+ **model** | [**TranscriptionModelIdentifier**](.md)| The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. |
+ **body** | **bytearray**| The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. |
+ **language** | [**TranscriptLanguageCode**](.md)| The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. | [optional]
+ **output_format** | [**TranscriptOutputFormat**](.md)| The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. | [optional]
+ **ruleset_id** | **str**| The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. | [optional]
+ **punctuation** | **bool**| Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. | [optional] [default to True]
+ **timestamp_granularity** | **str**| Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. | [optional] [default to 'segment']
+ **diarization** | **bool**| Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. | [optional] [default to False]
+ **initial_prompt** | **str**| An optional text prompt to provide context, guide the model's style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). | [optional]
+ **temperature** | **float**| Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. | [optional]
+ **smart_format** | **bool**| Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. | [optional]
+ **speakers_expected** | **int**| Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). | [optional]
+ **custom_vocabulary** | [**List[str]**](str.md)| Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). | [optional]
+
+### Return type
+
+[**TranscriptionResponse**](TranscriptionResponse.md)
+
+### Authorization
+
+[bearerAuth](../README.md#bearerAuth)
+
+### HTTP request headers
+
+ - **Content-Type**: audio/*
+ - **Accept**: application/json, text/plain
+
+### HTTP response details
+| Status code | Description | Response headers |
+|-------------|-------------|------------------|
+**200** | Successful transcription response. The content type and structure depend on the `output_format` parameter specified in the request. - `application/json`: Returned for `output_format=json` or `json_text`. See `TranscriptionResponse` schema (`TranscriptionDetailed` or `TranscriptionOnlyText`). - `text/plain`: Returned for `output_format=text`. | - |
+**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - |
+**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - |
+**402** | Payment Required - There is no credit left on your account. | - |
+**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn't exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - |
+**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
|
+**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - |
+**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - |
+**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - |
+
+[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
+
+# **transcribe_remote**
+> TranscriptionResponse transcribe_remote(remote_transcription_configuration)
+
+Transcribe an audio file located at a remote URL.
+
+This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL.
+Provide the URL and transcription options within the JSON request body.
+Useful for transcribing files already stored online.
+
+
+### Example
+
+* Bearer (API Key) Authentication (bearerAuth):
+```python
+import time
+import os
+import speechall
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration
+from speechall.models.transcription_response import TranscriptionResponse
+from speechall.rest import ApiException
+from pprint import pprint
+
+# Defining the host is optional and defaults to https://api.speechall.com/v1
+# See configuration.py for a list of all supported configuration parameters.
+configuration = speechall.Configuration(
+ host = "https://api.speechall.com/v1"
+)
+
+# The client must configure the authentication and authorization parameters
+# in accordance with the API server security policy.
+# Examples for each auth method are provided below, use the example that
+# satisfies your auth use case.
+
+# Configure Bearer authorization (API Key): bearerAuth
+configuration = speechall.Configuration(
+ access_token = os.environ["BEARER_TOKEN"]
+)
+
+# Enter a context with an instance of the API client
+with speechall.ApiClient(configuration) as api_client:
+ # Create an instance of the API class
+ api_instance = speechall.SpeechToTextApi(api_client)
+ remote_transcription_configuration = {"file_url":"https://example.com/path/to/audio.mp3","model":"openai.whisper-1","language":"en","output_format":"json","diarization":true} # RemoteTranscriptionConfiguration | JSON object containing the URL of the audio file and the desired transcription options.
+
+ try:
+ # Transcribe an audio file located at a remote URL.
+ api_response = api_instance.transcribe_remote(remote_transcription_configuration)
+ print("The response of SpeechToTextApi->transcribe_remote:\n")
+ pprint(api_response)
+ except Exception as e:
+ print("Exception when calling SpeechToTextApi->transcribe_remote: %s\n" % e)
+```
+
+
+
+### Parameters
+
+Name | Type | Description | Notes
+------------- | ------------- | ------------- | -------------
+ **remote_transcription_configuration** | [**RemoteTranscriptionConfiguration**](RemoteTranscriptionConfiguration.md)| JSON object containing the URL of the audio file and the desired transcription options. |
+
+### Return type
+
+[**TranscriptionResponse**](TranscriptionResponse.md)
+
+### Authorization
+
+[bearerAuth](../README.md#bearerAuth)
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json, text/plain
+
+### HTTP response details
+| Status code | Description | Response headers |
+|-------------|-------------|------------------|
+**200** | Successful transcription response. The content type and structure depend on the `output_format` parameter specified in the request. - `application/json`: Returned for `output_format=json` or `json_text`. See `TranscriptionResponse` schema (`TranscriptionDetailed` or `TranscriptionOnlyText`). - `text/plain`: Returned for `output_format=text`. | - |
+**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - |
+**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - |
+**402** | Payment Required - There is no credit left on your account. | - |
+**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn't exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - |
+**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
|
+**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - |
+**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - |
+**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - |
+
+[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
+
diff --git a/docs/SpeechToTextModel.md b/docs/SpeechToTextModel.md
new file mode 100644
index 0000000..4aa7e93
--- /dev/null
+++ b/docs/SpeechToTextModel.md
@@ -0,0 +1,57 @@
+# SpeechToTextModel
+
+Describes an available speech-to-text model, its provider, capabilities, and characteristics.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**id** | [**TranscriptionModelIdentifier**](TranscriptionModelIdentifier.md) | |
+**display_name** | **str** | A user-friendly name for the model. |
+**provider** | [**TranscriptionProvider**](TranscriptionProvider.md) | |
+**description** | **str** | A brief description of the model, its intended use case, or version notes. | [optional]
+**cost_per_second_usd** | **float** | The cost per second of audio processed in USD. | [optional]
+**is_available** | **bool** | Indicates whether the model is currently available for use. | [default to True]
+**supported_languages** | **List[str]** | A list of language codes (preferably BCP 47, e.g., \"en-US\", \"en-GB\", \"es-ES\") supported by this model. May include `auto` if automatic language detection is supported across multiple languages within a single audio file. | [optional]
+**punctuation** | **bool** | Indicates whether the model generally supports automatic punctuation insertion. | [optional]
+**diarization** | **bool** | Indicates whether the model generally supports speaker diarization (identifying different speakers). | [optional]
+**streamable** | **bool** | Indicates whether the model can be used for real-time streaming transcription via a WebSocket connection (if offered by Speechall). | [optional]
+**real_time_factor** | **float** | An approximate measure of processing speed for batch processing. Defined as (audio duration) / (processing time). A higher value means faster processing (e.g., RTF=2 means it processes 1 second of audio in 0.5 seconds). May not be available for all models or streaming scenarios. | [optional]
+**max_duration_seconds** | **float** | The maximum duration of a single audio file (in seconds) that the model can reliably process in one request. May vary by provider or plan. | [optional]
+**max_file_size_bytes** | **int** | The maximum size of a single audio file (in bytes) that can be uploaded for processing by this model. May vary by provider or plan. | [optional]
+**version** | **str** | The specific version identifier for the model. | [optional]
+**release_date** | **date** | The date when this specific version of the model was released or last updated. | [optional]
+**model_type** | **str** | The primary type or training domain of the model. Helps identify suitability for different audio types. | [optional]
+**accuracy_tier** | **str** | A general indication of the model's expected accuracy level relative to other models. Not a guaranteed metric. | [optional]
+**supported_audio_encodings** | **List[str]** | A list of audio encodings that this model supports or is optimized for (e.g., LINEAR16, FLAC, MP3, Opus). | [optional]
+**supported_sample_rates** | **List[int]** | A list of audio sample rates (in Hz) that this model supports or is optimized for. | [optional]
+**speaker_labels** | **bool** | Indicates whether the model can provide speaker labels for the transcription. | [optional]
+**word_timestamps** | **bool** | Indicates whether the model can provide timestamps for individual words. | [optional]
+**confidence_scores** | **bool** | Indicates whether the model provides confidence scores for the transcription or individual words. | [optional]
+**language_detection** | **bool** | Indicates whether the model supports automatic language detection for input audio. | [optional]
+**custom_vocabulary_support** | **bool** | Indicates if the model can leverage a custom vocabulary or language model adaptation. | [optional]
+**profanity_filtering** | **bool** | Indicates if the model supports filtering or masking of profanity. | [optional]
+**noise_reduction** | **bool** | Indicates if the model supports noise reduction. | [optional]
+**supports_srt** | **bool** | Indicates whether the model supports SRT subtitle format output. | [default to False]
+**supports_vtt** | **bool** | Indicates whether the model supports VTT subtitle format output. | [default to False]
+**voice_activity_detection** | **bool** | Indicates whether the model supports voice activity detection (VAD) to identify speech segments. | [optional]
+
+## Example
+
+```python
+from speechall.models.speech_to_text_model import SpeechToTextModel
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of SpeechToTextModel from a JSON string
+speech_to_text_model_instance = SpeechToTextModel.from_json(json)
+# print the JSON string representation of the object
+print SpeechToTextModel.to_json()
+
+# convert the object into a dict
+speech_to_text_model_dict = speech_to_text_model_instance.to_dict()
+# create an instance of SpeechToTextModel from a dict
+speech_to_text_model_from_dict = SpeechToTextModel.from_dict(speech_to_text_model_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptLanguageCode.md b/docs/TranscriptLanguageCode.md
new file mode 100644
index 0000000..11c73ce
--- /dev/null
+++ b/docs/TranscriptLanguageCode.md
@@ -0,0 +1,215 @@
+# TranscriptLanguageCode
+
+The language code of the audio file, typically in ISO 639-1 format. Specifying the correct language improves transcription accuracy and speed. The special value `auto` can be used to request automatic language detection, if supported by the selected model. If omitted, the default language is English (`en`).
+
+## Enum
+
+* `AUTO` (value: `'auto'`)
+
+* `EN` (value: `'en'`)
+
+* `EN_AU` (value: `'en_au'`)
+
+* `EN_UK` (value: `'en_uk'`)
+
+* `EN_US` (value: `'en_us'`)
+
+* `AF` (value: `'af'`)
+
+* `AM` (value: `'am'`)
+
+* `AR` (value: `'ar'`)
+
+* `AS` (value: `'as'`)
+
+* `AZ` (value: `'az'`)
+
+* `BA` (value: `'ba'`)
+
+* `BE` (value: `'be'`)
+
+* `BG` (value: `'bg'`)
+
+* `BN` (value: `'bn'`)
+
+* `BO` (value: `'bo'`)
+
+* `BR` (value: `'br'`)
+
+* `BS` (value: `'bs'`)
+
+* `CA` (value: `'ca'`)
+
+* `CS` (value: `'cs'`)
+
+* `CY` (value: `'cy'`)
+
+* `DA` (value: `'da'`)
+
+* `DE` (value: `'de'`)
+
+* `EL` (value: `'el'`)
+
+* `ES` (value: `'es'`)
+
+* `ET` (value: `'et'`)
+
+* `EU` (value: `'eu'`)
+
+* `FA` (value: `'fa'`)
+
+* `FI` (value: `'fi'`)
+
+* `FO` (value: `'fo'`)
+
+* `FR` (value: `'fr'`)
+
+* `GL` (value: `'gl'`)
+
+* `GU` (value: `'gu'`)
+
+* `HA` (value: `'ha'`)
+
+* `HAW` (value: `'haw'`)
+
+* `HE` (value: `'he'`)
+
+* `HI` (value: `'hi'`)
+
+* `HR` (value: `'hr'`)
+
+* `HT` (value: `'ht'`)
+
+* `HU` (value: `'hu'`)
+
+* `HY` (value: `'hy'`)
+
+* `ID` (value: `'id'`)
+
+* `IS` (value: `'is'`)
+
+* `IT` (value: `'it'`)
+
+* `JA` (value: `'ja'`)
+
+* `JW` (value: `'jw'`)
+
+* `KA` (value: `'ka'`)
+
+* `KK` (value: `'kk'`)
+
+* `KM` (value: `'km'`)
+
+* `KN` (value: `'kn'`)
+
+* `KO` (value: `'ko'`)
+
+* `LA` (value: `'la'`)
+
+* `LB` (value: `'lb'`)
+
+* `LN` (value: `'ln'`)
+
+* `LO` (value: `'lo'`)
+
+* `LT` (value: `'lt'`)
+
+* `LV` (value: `'lv'`)
+
+* `MG` (value: `'mg'`)
+
+* `MI` (value: `'mi'`)
+
+* `MK` (value: `'mk'`)
+
+* `ML` (value: `'ml'`)
+
+* `MN` (value: `'mn'`)
+
+* `MR` (value: `'mr'`)
+
+* `MS` (value: `'ms'`)
+
+* `MT` (value: `'mt'`)
+
+* `MY` (value: `'my'`)
+
+* `NE` (value: `'ne'`)
+
+* `NL` (value: `'nl'`)
+
+* `NN` (value: `'nn'`)
+
+* `FALSE` (value: `'false'`)
+
+* `OC` (value: `'oc'`)
+
+* `PA` (value: `'pa'`)
+
+* `PL` (value: `'pl'`)
+
+* `PS` (value: `'ps'`)
+
+* `PT` (value: `'pt'`)
+
+* `RO` (value: `'ro'`)
+
+* `RU` (value: `'ru'`)
+
+* `SA` (value: `'sa'`)
+
+* `SD` (value: `'sd'`)
+
+* `SI` (value: `'si'`)
+
+* `SK` (value: `'sk'`)
+
+* `SL` (value: `'sl'`)
+
+* `SN` (value: `'sn'`)
+
+* `SO` (value: `'so'`)
+
+* `SQ` (value: `'sq'`)
+
+* `SR` (value: `'sr'`)
+
+* `SU` (value: `'su'`)
+
+* `SV` (value: `'sv'`)
+
+* `SW` (value: `'sw'`)
+
+* `TA` (value: `'ta'`)
+
+* `TE` (value: `'te'`)
+
+* `TG` (value: `'tg'`)
+
+* `TH` (value: `'th'`)
+
+* `TK` (value: `'tk'`)
+
+* `TL` (value: `'tl'`)
+
+* `TR` (value: `'tr'`)
+
+* `TT` (value: `'tt'`)
+
+* `UK` (value: `'uk'`)
+
+* `UR` (value: `'ur'`)
+
+* `UZ` (value: `'uz'`)
+
+* `VI` (value: `'vi'`)
+
+* `YI` (value: `'yi'`)
+
+* `YO` (value: `'yo'`)
+
+* `ZH` (value: `'zh'`)
+
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptOutputFormat.md b/docs/TranscriptOutputFormat.md
new file mode 100644
index 0000000..01df029
--- /dev/null
+++ b/docs/TranscriptOutputFormat.md
@@ -0,0 +1,19 @@
+# TranscriptOutputFormat
+
+Specifies the desired format of the transcription output. - `text`: Plain text containing the full transcription. - `json_text`: A simple JSON object containing the transcription ID and the full text (`TranscriptionOnlyText` schema). - `json`: A detailed JSON object including segments, timestamps (based on `timestamp_granularity`), language, and potentially speaker labels and provider metadata (`TranscriptionDetailed` schema). - `srt`: SubRip subtitle format (returned as plain text). - `vtt`: WebVTT subtitle format (returned as plain text).
+
+## Enum
+
+* `TEXT` (value: `'text'`)
+
+* `JSON_TEXT` (value: `'json_text'`)
+
+* `JSON` (value: `'json'`)
+
+* `SRT` (value: `'srt'`)
+
+* `VTT` (value: `'vtt'`)
+
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionDetailed.md b/docs/TranscriptionDetailed.md
new file mode 100644
index 0000000..2f246be
--- /dev/null
+++ b/docs/TranscriptionDetailed.md
@@ -0,0 +1,35 @@
+# TranscriptionDetailed
+
+A detailed JSON response format containing the full text, detected language, duration, individual timed segments, and potentially speaker labels and provider-specific metadata. Returned when `output_format` is `json`.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**id** | **str** | A unique identifier for the transcription job/request. |
+**text** | **str** | The full transcribed text as a single string. |
+**language** | **str** | The detected or specified language of the audio (ISO 639-1 code). | [optional]
+**duration** | **float** | The total duration of the processed audio file in seconds. **Deprecated**: This property may be removed in future versions as duration analysis might occur asynchronously. Rely on segment end times for duration information if needed. | [optional]
+**segments** | [**List[TranscriptionSegment]**](TranscriptionSegment.md) | An array of transcribed segments, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional]
+**words** | [**List[TranscriptionWord]**](TranscriptionWord.md) | An array of transcribed words, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional]
+**provider_metadata** | **Dict[str, object]** | An optional object containing additional metadata returned directly from the underlying STT provider. The structure of this object is provider-dependent. | [optional]
+
+## Example
+
+```python
+from speechall.models.transcription_detailed import TranscriptionDetailed
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of TranscriptionDetailed from a JSON string
+transcription_detailed_instance = TranscriptionDetailed.from_json(json)
+# print the JSON string representation of the object
+print TranscriptionDetailed.to_json()
+
+# convert the object into a dict
+transcription_detailed_dict = transcription_detailed_instance.to_dict()
+# create an instance of TranscriptionDetailed from a dict
+transcription_detailed_from_dict = TranscriptionDetailed.from_dict(transcription_detailed_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionModelIdentifier.md b/docs/TranscriptionModelIdentifier.md
new file mode 100644
index 0000000..526c2c2
--- /dev/null
+++ b/docs/TranscriptionModelIdentifier.md
@@ -0,0 +1,149 @@
+# TranscriptionModelIdentifier
+
+Unique identifier for a specific Speech-to-Text model, composed as `provider.model_name`. Used to select the engine for transcription.
+
+## Enum
+
+* `AMAZON_DOT_TRANSCRIBE` (value: `'amazon.transcribe'`)
+
+* `ASSEMBLYAI_DOT_BEST` (value: `'assemblyai.best'`)
+
+* `ASSEMBLYAI_DOT_NANO` (value: `'assemblyai.nano'`)
+
+* `ASSEMBLYAI_DOT_SLAM_MINUS_1` (value: `'assemblyai.slam-1'`)
+
+* `ASSEMBLYAI_DOT_UNIVERSAL` (value: `'assemblyai.universal'`)
+
+* `AZURE_DOT_STANDARD` (value: `'azure.standard'`)
+
+* `CLOUDFLARE_DOT_WHISPER` (value: `'cloudflare.whisper'`)
+
+* `CLOUDFLARE_DOT_WHISPER_MINUS_LARGE_MINUS_V3_MINUS_TURBO` (value: `'cloudflare.whisper-large-v3-turbo'`)
+
+* `CLOUDFLARE_DOT_WHISPER_MINUS_TINY_MINUS_EN` (value: `'cloudflare.whisper-tiny-en'`)
+
+* `DEEPGRAM_DOT_BASE` (value: `'deepgram.base'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_CONVERSATIONALAI` (value: `'deepgram.base-conversationalai'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_FINANCE` (value: `'deepgram.base-finance'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_GENERAL` (value: `'deepgram.base-general'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_MEETING` (value: `'deepgram.base-meeting'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_PHONECALL` (value: `'deepgram.base-phonecall'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_VIDEO` (value: `'deepgram.base-video'`)
+
+* `DEEPGRAM_DOT_BASE_MINUS_VOICEMAIL` (value: `'deepgram.base-voicemail'`)
+
+* `DEEPGRAM_DOT_ENHANCED` (value: `'deepgram.enhanced'`)
+
+* `DEEPGRAM_DOT_ENHANCED_MINUS_FINANCE` (value: `'deepgram.enhanced-finance'`)
+
+* `DEEPGRAM_DOT_ENHANCED_MINUS_GENERAL` (value: `'deepgram.enhanced-general'`)
+
+* `DEEPGRAM_DOT_ENHANCED_MINUS_MEETING` (value: `'deepgram.enhanced-meeting'`)
+
+* `DEEPGRAM_DOT_ENHANCED_MINUS_PHONECALL` (value: `'deepgram.enhanced-phonecall'`)
+
+* `DEEPGRAM_DOT_NOVA` (value: `'deepgram.nova'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_GENERAL` (value: `'deepgram.nova-general'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_PHONECALL` (value: `'deepgram.nova-phonecall'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2` (value: `'deepgram.nova-2'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_ATC` (value: `'deepgram.nova-2-atc'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_AUTOMOTIVE` (value: `'deepgram.nova-2-automotive'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_CONVERSATIONALAI` (value: `'deepgram.nova-2-conversationalai'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_DRIVETHRU` (value: `'deepgram.nova-2-drivethru'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_FINANCE` (value: `'deepgram.nova-2-finance'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_GENERAL` (value: `'deepgram.nova-2-general'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_MEDICAL` (value: `'deepgram.nova-2-medical'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_MEETING` (value: `'deepgram.nova-2-meeting'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_PHONECALL` (value: `'deepgram.nova-2-phonecall'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_VIDEO` (value: `'deepgram.nova-2-video'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_VOICEMAIL` (value: `'deepgram.nova-2-voicemail'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_3` (value: `'deepgram.nova-3'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_3_MINUS_GENERAL` (value: `'deepgram.nova-3-general'`)
+
+* `DEEPGRAM_DOT_NOVA_MINUS_3_MINUS_MEDICAL` (value: `'deepgram.nova-3-medical'`)
+
+* `DEEPGRAM_DOT_WHISPER` (value: `'deepgram.whisper'`)
+
+* `DEEPGRAM_DOT_WHISPER_MINUS_BASE` (value: `'deepgram.whisper-base'`)
+
+* `DEEPGRAM_DOT_WHISPER_MINUS_LARGE` (value: `'deepgram.whisper-large'`)
+
+* `DEEPGRAM_DOT_WHISPER_MINUS_MEDIUM` (value: `'deepgram.whisper-medium'`)
+
+* `DEEPGRAM_DOT_WHISPER_MINUS_SMALL` (value: `'deepgram.whisper-small'`)
+
+* `DEEPGRAM_DOT_WHISPER_MINUS_TINY` (value: `'deepgram.whisper-tiny'`)
+
+* `FALAI_DOT_ELEVENLABS_MINUS_SPEECH_MINUS_TO_MINUS_TEXT` (value: `'falai.elevenlabs-speech-to-text'`)
+
+* `FALAI_DOT_SPEECH_MINUS_TO_MINUS_TEXT` (value: `'falai.speech-to-text'`)
+
+* `FALAI_DOT_WHISPER` (value: `'falai.whisper'`)
+
+* `FALAI_DOT_WIZPER` (value: `'falai.wizper'`)
+
+* `FIREWORKSAI_DOT_WHISPER_MINUS_V3` (value: `'fireworksai.whisper-v3'`)
+
+* `FIREWORKSAI_DOT_WHISPER_MINUS_V3_MINUS_TURBO` (value: `'fireworksai.whisper-v3-turbo'`)
+
+* `GLADIA_DOT_STANDARD` (value: `'gladia.standard'`)
+
+* `GOOGLE_DOT_ENHANCED` (value: `'google.enhanced'`)
+
+* `GOOGLE_DOT_STANDARD` (value: `'google.standard'`)
+
+* `GEMINI_DOT_GEMINI_MINUS_2_DOT_5_MINUS_FLASH_MINUS_PREVIEW_MINUS_05_MINUS_20` (value: `'gemini.gemini-2.5-flash-preview-05-20'`)
+
+* `GEMINI_DOT_GEMINI_MINUS_2_DOT_5_MINUS_PRO_MINUS_PREVIEW_MINUS_06_MINUS_05` (value: `'gemini.gemini-2.5-pro-preview-06-05'`)
+
+* `GEMINI_DOT_GEMINI_MINUS_2_DOT_0_MINUS_FLASH` (value: `'gemini.gemini-2.0-flash'`)
+
+* `GEMINI_DOT_GEMINI_MINUS_2_DOT_0_MINUS_FLASH_MINUS_LITE` (value: `'gemini.gemini-2.0-flash-lite'`)
+
+* `GROQ_DOT_DISTIL_MINUS_WHISPER_MINUS_LARGE_MINUS_V3_MINUS_EN` (value: `'groq.distil-whisper-large-v3-en'`)
+
+* `GROQ_DOT_WHISPER_MINUS_LARGE_MINUS_V3` (value: `'groq.whisper-large-v3'`)
+
+* `GROQ_DOT_WHISPER_MINUS_LARGE_MINUS_V3_MINUS_TURBO` (value: `'groq.whisper-large-v3-turbo'`)
+
+* `IBM_DOT_STANDARD` (value: `'ibm.standard'`)
+
+* `OPENAI_DOT_WHISPER_MINUS_1` (value: `'openai.whisper-1'`)
+
+* `OPENAI_DOT_GPT_MINUS_4O_MINUS_TRANSCRIBE` (value: `'openai.gpt-4o-transcribe'`)
+
+* `OPENAI_DOT_GPT_MINUS_4O_MINUS_MINI_MINUS_TRANSCRIBE` (value: `'openai.gpt-4o-mini-transcribe'`)
+
+* `REVAI_DOT_MACHINE` (value: `'revai.machine'`)
+
+* `REVAI_DOT_FUSION` (value: `'revai.fusion'`)
+
+* `SPEECHMATICS_DOT_ENHANCED` (value: `'speechmatics.enhanced'`)
+
+* `SPEECHMATICS_DOT_STANDARD` (value: `'speechmatics.standard'`)
+
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionOnlyText.md b/docs/TranscriptionOnlyText.md
new file mode 100644
index 0000000..b083851
--- /dev/null
+++ b/docs/TranscriptionOnlyText.md
@@ -0,0 +1,30 @@
+# TranscriptionOnlyText
+
+A simplified JSON response format containing only the transcription ID and the full transcribed text. Returned when `output_format` is `json_text`.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**id** | **str** | A unique identifier for the transcription job/request. |
+**text** | **str** | The full transcribed text as a single string. |
+
+## Example
+
+```python
+from speechall.models.transcription_only_text import TranscriptionOnlyText
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of TranscriptionOnlyText from a JSON string
+transcription_only_text_instance = TranscriptionOnlyText.from_json(json)
+# print the JSON string representation of the object
+print TranscriptionOnlyText.to_json()
+
+# convert the object into a dict
+transcription_only_text_dict = transcription_only_text_instance.to_dict()
+# create an instance of TranscriptionOnlyText from a dict
+transcription_only_text_from_dict = TranscriptionOnlyText.from_dict(transcription_only_text_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionProvider.md b/docs/TranscriptionProvider.md
new file mode 100644
index 0000000..8de5869
--- /dev/null
+++ b/docs/TranscriptionProvider.md
@@ -0,0 +1,39 @@
+# TranscriptionProvider
+
+The identifier for the underlying Speech-to-Text service provider (e.g., 'openai', 'deepgram').
+
+## Enum
+
+* `AMAZON` (value: `'amazon'`)
+
+* `ASSEMBLYAI` (value: `'assemblyai'`)
+
+* `AZURE` (value: `'azure'`)
+
+* `CLOUDFLARE` (value: `'cloudflare'`)
+
+* `DEEPGRAM` (value: `'deepgram'`)
+
+* `FALAI` (value: `'falai'`)
+
+* `FIREWORKSAI` (value: `'fireworksai'`)
+
+* `GEMINI` (value: `'gemini'`)
+
+* `GLADIA` (value: `'gladia'`)
+
+* `GOOGLE` (value: `'google'`)
+
+* `GROQ` (value: `'groq'`)
+
+* `IBM` (value: `'ibm'`)
+
+* `OPENAI` (value: `'openai'`)
+
+* `REVAI` (value: `'revai'`)
+
+* `SPEECHMATICS` (value: `'speechmatics'`)
+
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionResponse.md b/docs/TranscriptionResponse.md
new file mode 100644
index 0000000..765ab86
--- /dev/null
+++ b/docs/TranscriptionResponse.md
@@ -0,0 +1,35 @@
+# TranscriptionResponse
+
+Represents the JSON structure returned when a JSON-based `output_format` (`json` or `json_text`) is requested. It can be either a detailed structure or a simple text-only structure.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**id** | **str** | A unique identifier for the transcription job/request. |
+**text** | **str** | The full transcribed text as a single string. |
+**language** | **str** | The detected or specified language of the audio (ISO 639-1 code). | [optional]
+**duration** | **float** | The total duration of the processed audio file in seconds. **Deprecated**: This property may be removed in future versions as duration analysis might occur asynchronously. Rely on segment end times for duration information if needed. | [optional]
+**segments** | [**List[TranscriptionSegment]**](TranscriptionSegment.md) | An array of transcribed segments, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional]
+**words** | [**List[TranscriptionWord]**](TranscriptionWord.md) | An array of transcribed words, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional]
+**provider_metadata** | **Dict[str, object]** | An optional object containing additional metadata returned directly from the underlying STT provider. The structure of this object is provider-dependent. | [optional]
+
+## Example
+
+```python
+from speechall.models.transcription_response import TranscriptionResponse
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of TranscriptionResponse from a JSON string
+transcription_response_instance = TranscriptionResponse.from_json(json)
+# print the JSON string representation of the object
+print TranscriptionResponse.to_json()
+
+# convert the object into a dict
+transcription_response_dict = transcription_response_instance.to_dict()
+# create an instance of TranscriptionResponse from a dict
+transcription_response_from_dict = TranscriptionResponse.from_dict(transcription_response_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionSegment.md b/docs/TranscriptionSegment.md
new file mode 100644
index 0000000..e513856
--- /dev/null
+++ b/docs/TranscriptionSegment.md
@@ -0,0 +1,33 @@
+# TranscriptionSegment
+
+Represents a time-coded segment of the transcription, typically corresponding to a phrase, sentence, or speaker turn.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**start** | **float** | The start time of the segment in seconds from the beginning of the audio. | [optional]
+**end** | **float** | The end time of the segment in seconds from the beginning of the audio. | [optional]
+**text** | **str** | The transcribed text content of this segment. | [optional]
+**speaker** | **str** | An identifier for the speaker of this segment, present if diarization was enabled and successful. | [optional]
+**confidence** | **float** | The model's confidence score for the transcription of this segment, typically between 0 and 1 (if provided by the model). | [optional]
+
+## Example
+
+```python
+from speechall.models.transcription_segment import TranscriptionSegment
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of TranscriptionSegment from a JSON string
+transcription_segment_instance = TranscriptionSegment.from_json(json)
+# print the JSON string representation of the object
+print TranscriptionSegment.to_json()
+
+# convert the object into a dict
+transcription_segment_dict = transcription_segment_instance.to_dict()
+# create an instance of TranscriptionSegment from a dict
+transcription_segment_from_dict = TranscriptionSegment.from_dict(transcription_segment_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/docs/TranscriptionWord.md b/docs/TranscriptionWord.md
new file mode 100644
index 0000000..1e8bf58
--- /dev/null
+++ b/docs/TranscriptionWord.md
@@ -0,0 +1,33 @@
+# TranscriptionWord
+
+Represents a word in the transcription, providing time-coded chunks of the transcription.
+
+## Properties
+Name | Type | Description | Notes
+------------ | ------------- | ------------- | -------------
+**start** | **float** | The start time of the word in seconds from the beginning of the audio. |
+**end** | **float** | The end time of the word in seconds from the beginning of the audio. |
+**word** | **str** | The transcribed word. |
+**speaker** | **str** | An identifier for the speaker of this word, present if diarization was enabled and successful. | [optional]
+**confidence** | **float** | The model's confidence score for the transcription of this word, typically between 0 and 1 (if provided by the model). | [optional]
+
+## Example
+
+```python
+from speechall.models.transcription_word import TranscriptionWord
+
+# TODO update the JSON string below
+json = "{}"
+# create an instance of TranscriptionWord from a JSON string
+transcription_word_instance = TranscriptionWord.from_json(json)
+# print the JSON string representation of the object
+print TranscriptionWord.to_json()
+
+# convert the object into a dict
+transcription_word_dict = transcription_word_instance.to_dict()
+# create an instance of TranscriptionWord from a dict
+transcription_word_from_dict = TranscriptionWord.from_dict(transcription_word_dict)
+```
+[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
+
+
diff --git a/example_transcribe.py b/example_transcribe.py
new file mode 100755
index 0000000..df4bd28
--- /dev/null
+++ b/example_transcribe.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+"""
+Example script demonstrating how to use the Speechall API transcribe endpoint.
+
+This script shows how to:
+1. Set up the API client with authentication
+2. Upload and transcribe an audio file
+3. Use different models and options
+4. Handle responses in different formats
+
+Requirements:
+- Set SPEECHALL_API_TOKEN environment variable with your API token
+- Have an audio file to transcribe (or use the remote URL example)
+"""
+
+import os
+import sys
+from pathlib import Path
+import json
+
+from speechall import ApiClient, Configuration
+from speechall.api.speech_to_text_api import SpeechToTextApi
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration
+from speechall.exceptions import ApiException
+
+
+def setup_client():
+ """Set up the API client with authentication."""
+ # Get API token from environment variable
+ api_token = os.getenv('SPEECHALL_API_TOKEN')
+ if not api_token:
+ print("Error: Please set SPEECHALL_API_TOKEN environment variable")
+ print("Export your API token like: export SPEECHALL_API_TOKEN='your-token-here'")
+ sys.exit(1)
+
+ # Configure the API client
+ configuration = Configuration()
+ configuration.access_token = api_token
+ configuration.host = "https://api.speechall.com/v1" # Default host
+
+ # Create API client
+ api_client = ApiClient(configuration)
+ return SpeechToTextApi(api_client)
+
+
+def list_available_models(api_instance):
+ """List all available speech-to-text models."""
+ try:
+ print("๐ Available Speech-to-Text Models:")
+ print("=" * 50)
+
+ models = api_instance.list_speech_to_text_models()
+
+ for model in models[:3]: # Show first 3 models
+ print(f"๐ค {model.id}")
+ print(f" Name: {model.display_name}")
+ print(f" Provider: {model.provider}")
+ if hasattr(model, 'description') and model.description:
+ print(f" Description: {model.description}")
+ print()
+
+ if len(models) > 3:
+ print(f"... and {len(models) - 3} more models available")
+
+ except ApiException as e:
+ print(f"โ Error listing models: {e}")
+
+
+def transcribe_local_file(api_instance, file_path, model_id="openai.whisper-1", language="en"):
+ """Transcribe a local audio file."""
+ try:
+ print(f"๐ค Transcribing local file: {file_path}")
+ print(f" Model: {model_id}")
+ print(f" Language: {language}")
+ print("-" * 50)
+
+ # Check if file exists
+ if not Path(file_path).exists():
+ print(f"โ File not found: {file_path}")
+ return
+
+ # Read audio file
+ with open(file_path, 'rb') as audio_file:
+ audio_data = audio_file.read()
+
+ # Make transcription request
+ result = api_instance.transcribe(
+ model=TranscriptionModelIdentifier(model_id),
+ body=audio_data,
+ language=TranscriptLanguageCode(language),
+ output_format=TranscriptOutputFormat.JSON,
+ punctuation=True
+ )
+
+ print("โ
Transcription completed!")
+
+ # Access the text directly
+ transcribed_text = result.actual_instance.text
+ print(f"๐ Transcribed Text:\n{transcribed_text}")
+
+ # Also show the full result structure
+ # print(f"\n๐ Full Result:\n{json.dumps(result.to_dict(), indent=2, default=str)}")
+
+ except FileNotFoundError:
+ print(f"โ File not found: {file_path}")
+ except ApiException as e:
+ print(f"โ API Error: {e}")
+ except Exception as e:
+ print(f"โ Unexpected error: {e}")
+
+
+def transcribe_remote_url(api_instance, audio_url, model_id="openai.whisper-1"):
+ """Transcribe an audio file from a remote URL."""
+ try:
+ print(f"๐ Transcribing remote URL: {audio_url}")
+ print(f" Model: {model_id}")
+ print("-" * 50)
+
+ # Create remote transcription configuration
+ config = RemoteTranscriptionConfiguration(
+ url=audio_url,
+ model=TranscriptionModelIdentifier(model_id),
+ language=TranscriptLanguageCode.EN,
+ output_format=TranscriptOutputFormat.JSON,
+ punctuation=True,
+ timestamp_granularity="word" # Get word-level timestamps
+ )
+
+ # Make transcription request
+ result = api_instance.transcribe_remote(config)
+
+ print("โ
Transcription completed!")
+ print(f"๐ Result: {result}")
+
+ except ApiException as e:
+ print(f"โ API Error: {e}")
+ except Exception as e:
+ print(f"โ Unexpected error: {e}")
+
+
+def transcribe_with_advanced_features(api_instance, file_path):
+ """Demonstrate advanced transcription features."""
+ try:
+ print(f"๐ Advanced transcription with features:")
+ print(f" File: {file_path}")
+ print(f" Features: Diarization, Custom vocabulary, Smart formatting")
+ print("-" * 50)
+
+ if not Path(file_path).exists():
+ print(f"โ File not found: {file_path}")
+ return
+
+ with open(file_path, 'rb') as audio_file:
+ audio_data = audio_file.read()
+
+ # Use Deepgram model which supports advanced features
+ result = api_instance.transcribe(
+ model=TranscriptionModelIdentifier.ASSEMBLYAI_DOT_BEST,
+ body=audio_data,
+ language=TranscriptLanguageCode.EN,
+ output_format=TranscriptOutputFormat.JSON,
+ punctuation=True,
+ timestamp_granularity="word",
+ diarization=True, # Speaker identification
+ smart_format=True, # Smart formatting for numbers, dates, etc.
+ custom_vocabulary=["Speechall", "API", "Python", "SDK"], # Custom words
+ speakers_expected=2, # Hint about number of speakers
+ )
+
+ print("โ
Advanced transcription completed!")
+ print(f"๐ Result:\n{json.dumps(result.to_dict(), indent=2, default=str)}")
+
+ except ApiException as e:
+ print(f"โ API Error: {e}")
+ except Exception as e:
+ print(f"โ Unexpected error: {e}")
+
+
+def main():
+ """Main function demonstrating different transcription scenarios."""
+ print("๐๏ธ Speechall Python SDK - Transcription Examples")
+ print("=" * 60)
+
+ # Set up API client
+ api_instance = setup_client()
+
+ # Example 1: List available models
+ # list_available_models(api_instance)
+
+ # Example 2: Transcribe a local file (you'll need to provide your own audio file)
+ local_audio_file = os.path.expanduser("~/Downloads/how-dictop-works.mp3") # Replace with your audio file path
+ if Path(local_audio_file).exists():
+ transcribe_local_file(api_instance, local_audio_file)
+ else:
+ print(f"โน๏ธ Skipping local file example - {local_audio_file} not found")
+
+ # Example 3: Transcribe from remote URL (example URL - replace with real audio URL)
+ sample_audio_url = "https://example.com/sample-audio.wav"
+ print(f"โน๏ธ Remote URL example (replace with real audio URL): {sample_audio_url}")
+ # Uncomment the following line to test with a real audio URL:
+ # transcribe_remote_url(api_instance, sample_audio_url)
+
+ # Example 4: Advanced features (if you have a local audio file)
+ # if Path(local_audio_file).exists():
+ # transcribe_with_advanced_features(api_instance, local_audio_file)
+
+ print("\nโจ Examples completed!")
+ print("\n๐ Next steps:")
+ print("1. Set your SPEECHALL_API_TOKEN environment variable")
+ print("2. Replace 'example_audio.wav' with your actual audio file path")
+ print("3. Customize the models and parameters for your use case")
+ print("4. Check the API documentation for more advanced features")
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/fix_transcription_response.py b/fix_transcription_response.py
new file mode 100755
index 0000000..f39d1a1
--- /dev/null
+++ b/fix_transcription_response.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python3
+"""
+Automatic fix for TranscriptionResponse oneOf issue.
+
+This script automatically patches the generated TranscriptionResponse class
+to handle the case where TranscriptionDetailed is a superset of TranscriptionOnlyText.
+
+This should be run after OpenAPI code generation.
+"""
+
+import os
+import sys
+import re
+
+def apply_transcription_response_fix():
+ """Apply the fix to TranscriptionResponse."""
+
+ target_file = "speechall/models/transcription_response.py"
+
+ if not os.path.exists(target_file):
+ print(f"โ File not found: {target_file}")
+ return False
+
+ print(f"๐ง Applying TranscriptionResponse oneOf fix to {target_file}")
+
+ # Read the current file
+ with open(target_file, 'r') as f:
+ content = f.read()
+
+ # Check if the fix is already applied
+ if "# Parse JSON once to avoid multiple parsing" in content:
+ print("โ
Fix already applied - skipping")
+ return True
+
+ # Replace the from_json method
+ old_from_json = r''' @classmethod
+ def from_json\(cls, json_str: str\) -> TranscriptionResponse:
+ """Returns the object represented by the json string"""
+ instance = TranscriptionResponse\.construct\(\)
+ error_messages = \[\]
+ match = 0
+
+ # deserialize data into TranscriptionDetailed
+ try:
+ instance\.actual_instance = TranscriptionDetailed\.from_json\(json_str\)
+ match \+= 1
+ except \(ValidationError, ValueError\) as e:
+ error_messages\.append\(str\(e\)\)
+ # deserialize data into TranscriptionOnlyText
+ try:
+ instance\.actual_instance = TranscriptionOnlyText\.from_json\(json_str\)
+ match \+= 1
+ except \(ValidationError, ValueError\) as e:
+ error_messages\.append\(str\(e\)\)
+
+ if match > 1:
+ # more than 1 match
+ raise ValueError\("Multiple matches found when deserializing the JSON string into TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText\. Details: " \+ ", "\.join\(error_messages\)\)
+ elif match == 0:
+ # no match
+ raise ValueError\("No match found when deserializing the JSON string into TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText\. Details: " \+ ", "\.join\(error_messages\)\)
+ else:
+ return instance'''
+
+ new_from_json = ''' @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionResponse:
+ """Returns the object represented by the json string"""
+ instance = TranscriptionResponse.construct()
+ error_messages = []
+
+ # Parse JSON once to avoid multiple parsing
+ try:
+ json_obj = json.loads(json_str)
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON: {str(e)}")
+
+ # Try TranscriptionDetailed first - if it has extra fields beyond id/text, prefer it
+ # Check if the JSON contains fields that are specific to TranscriptionDetailed
+ has_detailed_fields = any(key in json_obj for key in ['language', 'duration', 'segments', 'words', 'provider_metadata'])
+
+ if has_detailed_fields:
+ # Definitely should be TranscriptionDetailed
+ try:
+ instance.actual_instance = TranscriptionDetailed.from_json(json_str)
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(f"TranscriptionDetailed validation failed: {str(e)}")
+
+ # Try TranscriptionDetailed first (even without extra fields, it might still be the correct type)
+ try:
+ instance.actual_instance = TranscriptionDetailed.from_json(json_str)
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(f"TranscriptionDetailed validation failed: {str(e)}")
+
+ # Fall back to TranscriptionOnlyText
+ try:
+ instance.actual_instance = TranscriptionOnlyText.from_json(json_str)
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(f"TranscriptionOnlyText validation failed: {str(e)}")
+
+ # If we get here, neither worked
+ raise ValueError("No match found when deserializing the JSON string into TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText. Details: " + ", ".join(error_messages))'''
+
+ # Replace the validator method
+ old_validator = r''' @validator\('actual_instance'\)
+ def actual_instance_must_validate_oneof\(cls, v\):
+ instance = TranscriptionResponse\.construct\(\)
+ error_messages = \[\]
+ match = 0
+ # validate data type: TranscriptionDetailed
+ if not isinstance\(v, TranscriptionDetailed\):
+ error_messages\.append\(f"Error! Input type `\{type\(v\)\}` is not `TranscriptionDetailed`"\)
+ else:
+ match \+= 1
+ # validate data type: TranscriptionOnlyText
+ if not isinstance\(v, TranscriptionOnlyText\):
+ error_messages\.append\(f"Error! Input type `\{type\(v\)\}` is not `TranscriptionOnlyText`"\)
+ else:
+ match \+= 1
+ if match > 1:
+ # more than 1 match
+ raise ValueError\("Multiple matches found when setting `actual_instance` in TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText\. Details: " \+ ", "\.join\(error_messages\)\)
+ elif match == 0:
+ # no match
+ raise ValueError\("No match found when setting `actual_instance` in TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText\. Details: " \+ ", "\.join\(error_messages\)\)
+ else:
+ return v'''
+
+ new_validator = ''' @validator('actual_instance')
+ def actual_instance_must_validate_oneof(cls, v):
+ # Check if it's a valid type for either schema
+ if isinstance(v, (TranscriptionDetailed, TranscriptionOnlyText)):
+ return v
+
+ # If not an instance of either expected type, raise error
+ error_messages = [
+ f"Error! Input type `{type(v)}` is not `TranscriptionDetailed`",
+ f"Error! Input type `{type(v)}` is not `TranscriptionOnlyText`"
+ ]
+ raise ValueError("No match found when setting `actual_instance` in TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText. Details: " + ", ".join(error_messages))'''
+
+ # Apply the replacements
+ try:
+ # Replace from_json method
+ content = re.sub(old_from_json, new_from_json, content, flags=re.DOTALL)
+
+ # Replace validator method
+ content = re.sub(old_validator, new_validator, content, flags=re.DOTALL)
+
+ # Write the fixed content back
+ with open(target_file, 'w') as f:
+ f.write(content)
+
+ print("โ
TranscriptionResponse fix applied successfully!")
+ return True
+
+ except Exception as e:
+ print(f"โ Error applying fix: {e}")
+ return False
+
+def apply_release_date_fix():
+ """Apply the fix to SpeechToTextModel release_date field."""
+
+ target_file = "speechall/models/speech_to_text_model.py"
+
+ if not os.path.exists(target_file):
+ print(f"โ File not found: {target_file}")
+ return False
+
+ print(f"๐ง Applying release_date fix to {target_file}")
+
+ # Read the current file
+ with open(target_file, 'r') as f:
+ content = f.read()
+
+ # Check if the fix is already applied
+ if "from datetime import date, datetime" in content and "Added this to fix the release_date field" in content:
+ print("โ
Fix already applied - skipping")
+ return True
+
+ old_content = '''from datetime import date'''
+ new_content = '''from datetime import date, datetime'''
+
+ # Replace the old content with the new content
+ content = content.replace(old_content, new_content)
+
+ old_content = ''' @validator('accuracy_tier')
+ def accuracy_tier_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ if value not in ('basic', 'standard', 'enhanced', 'premium',):
+ raise ValueError("must be one of enum values ('basic', 'standard', 'enhanced', 'premium')")
+ return value'''
+
+ new_content = ''' @validator('accuracy_tier')
+ def accuracy_tier_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ if value not in ('basic', 'standard', 'enhanced', 'premium',):
+ raise ValueError("must be one of enum values ('basic', 'standard', 'enhanced', 'premium')")
+ return value
+
+ # Added this to fix the release_date field
+ @validator('release_date', pre=True)
+ def parse_release_date(cls, value):
+ """Parse release_date from various string formats"""
+ if value is None or isinstance(value, date):
+ return value
+
+ if isinstance(value, str):
+ # Try common date formats
+ date_formats = [
+ '%Y-%m-%d', # ISO format: 2023-12-25
+ '%m/%d/%Y', # US format: 12/25/2023
+ '%d/%m/%Y', # European format: 25/12/2023
+ '%Y-%m-%dT%H:%M:%S', # ISO datetime format
+ '%Y-%m-%dT%H:%M:%SZ',# ISO datetime with Z
+ '%Y-%m-%d %H:%M:%S', # Space separated datetime
+ ]
+
+ for fmt in date_formats:
+ try:
+ parsed_datetime = datetime.strptime(value, fmt)
+ return parsed_datetime.date()
+ except ValueError:
+ continue
+
+ # If no format works, try to return None to avoid errors
+ return None
+
+ return value'''
+
+ # Replace the old content with the new content
+ content = content.replace(old_content, new_content)
+
+ # Write the fixed content back
+ with open(target_file, 'w') as f:
+ f.write(content)
+
+ print("โ
Release date fix applied successfully!")
+ return True
+
+def main():
+ """Main function."""
+ if apply_transcription_response_fix():
+ print("๐ Automatic fix completed successfully!")
+ else:
+ print("โ Fix failed!")
+ sys.exit(1)
+
+ if apply_release_date_fix():
+ print("๐ Automatic fix completed successfully!")
+ else:
+ print("โ Fix failed!")
+ sys.exit(1)
+
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/git_push.sh b/git_push.sh
new file mode 100644
index 0000000..f53a75d
--- /dev/null
+++ b/git_push.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+# ref: https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/
+#
+# Usage example: /bin/sh ./git_push.sh wing328 openapi-petstore-perl "minor update" "gitlab.com"
+
+git_user_id=$1
+git_repo_id=$2
+release_note=$3
+git_host=$4
+
+if [ "$git_host" = "" ]; then
+ git_host="github.com"
+ echo "[INFO] No command line input provided. Set \$git_host to $git_host"
+fi
+
+if [ "$git_user_id" = "" ]; then
+ git_user_id="GIT_USER_ID"
+ echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id"
+fi
+
+if [ "$git_repo_id" = "" ]; then
+ git_repo_id="GIT_REPO_ID"
+ echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id"
+fi
+
+if [ "$release_note" = "" ]; then
+ release_note="Minor update"
+ echo "[INFO] No command line input provided. Set \$release_note to $release_note"
+fi
+
+# Initialize the local directory as a Git repository
+git init
+
+# Adds the files in the local repository and stages them for commit.
+git add .
+
+# Commits the tracked changes and prepares them to be pushed to a remote repository.
+git commit -m "$release_note"
+
+# Sets the new remote
+git_remote=$(git remote)
+if [ "$git_remote" = "" ]; then # git remote not defined
+
+ if [ "$GIT_TOKEN" = "" ]; then
+ echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment."
+ git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git
+ else
+ git remote add origin https://${git_user_id}:"${GIT_TOKEN}"@${git_host}/${git_user_id}/${git_repo_id}.git
+ fi
+
+fi
+
+git pull origin master
+
+# Pushes (Forces) the changes in the local repository up to the remote repository
+echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git"
+git push origin master 2>&1 | grep -v 'To https'
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..66871ad
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,51 @@
+[project]
+name = "speechall"
+version = "0.1.0"
+description = "Speechall API Python SDK"
+authors = [
+ { name = "Speechall", email = "info@actondon.com" }
+]
+license = "MIT"
+readme = "README.md"
+requires-python = ">=3.8"
+classifiers = [
+ "Typing :: Typed",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Operating System :: OS Independent",
+ "Operating System :: POSIX",
+ "Operating System :: MacOS",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: Microsoft :: Windows",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "License :: OSI Approved :: MIT License"
+]
+keywords = ["Speechall", "Speech-to-Text", "API", "ASR", "Transcription"]
+
+dependencies = [
+ "urllib3>=1.25.3",
+ "python-dateutil>=2.8.2",
+ "pydantic>=1.10.5,<2",
+ "aenum>=3.1.11",
+]
+
+[project.optional-dependencies]
+dev = [
+ "pytest>=7.2.1",
+ "tox>=3.9.0",
+ "flake8>=4.0.0",
+]
+
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
+
+[tool.hatch.build.targets.wheel]
+packages = ["speechall"]
+
+[tool.pylint.'MESSAGES CONTROL']
+extension-pkg-whitelist = "pydantic"
diff --git a/regenerate.sh b/regenerate.sh
new file mode 100755
index 0000000..a9b0d41
--- /dev/null
+++ b/regenerate.sh
@@ -0,0 +1,223 @@
+#!/bin/bash
+
+# OpenAPI Client Regeneration Script
+# This script regenerates the OpenAPI client code while preserving custom files
+
+set -e # Exit on any error
+
+# Configuration
+OPENAPI_SPEC_PATH="../speechall-openapi/openapi.yaml"
+GENERATOR="python-pydantic-v1"
+OUTPUT_DIR="."
+TEMP_OUTPUT_DIR="./temp_generated_client"
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+echo -e "${BLUE}๐ Speechall OpenAPI Client Regeneration${NC}"
+echo "=============================================="
+
+# Check if OpenAPI spec exists
+if [ ! -f "$OPENAPI_SPEC_PATH" ]; then
+ echo -e "${RED}โ Error: OpenAPI spec not found at $OPENAPI_SPEC_PATH${NC}"
+ echo "Please ensure the speechall-openapi repository is cloned at ../speechall-openapi/"
+ exit 1
+fi
+
+# Check if openapi-generator is available
+if ! command -v openapi-generator &> /dev/null; then
+ echo -e "${RED}โ Error: openapi-generator command not found${NC}"
+ echo "Please install it with: npm install @openapitools/openapi-generator-cli -g"
+ echo "Or use: brew install openapi-generator"
+ exit 1
+fi
+
+# Show current status
+echo -e "${YELLOW}๐ Current status:${NC}"
+echo " OpenAPI Spec: $OPENAPI_SPEC_PATH"
+echo " Generator: $GENERATOR"
+echo " Output Directory: $OUTPUT_DIR"
+echo ""
+
+# Backup custom files (just in case)
+echo -e "${YELLOW}๐พ Creating backup of custom files...${NC}"
+BACKUP_DIR="backup_$(date +%Y%m%d_%H%M%S)"
+mkdir -p "$BACKUP_DIR"
+
+# Read .openapi-generator-ignore and backup all files listed there
+if [ -f ".openapi-generator-ignore" ]; then
+ echo " ๐ Reading .openapi-generator-ignore file..."
+ while IFS= read -r line; do
+ # Skip empty lines and comments
+ if [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]]; then
+ continue
+ fi
+
+ # Handle different types of patterns
+ if [[ "$line" == *"/**" ]]; then
+ # Handle directory patterns like .venv/**, __pycache__/**
+ dir_pattern="${line%/**}"
+ if [ -d "$dir_pattern" ]; then
+ mkdir -p "$BACKUP_DIR/$(dirname "$dir_pattern")" 2>/dev/null
+ cp -r "$dir_pattern" "$BACKUP_DIR/$(dirname "$dir_pattern")/" 2>/dev/null
+ echo " โ
Backed up directory $dir_pattern"
+ fi
+ elif [[ "$line" != *"*"* ]]; then
+ # Handle simple files (with or without directory paths)
+ if [ -f "$line" ]; then
+ # Create directory structure if needed
+ mkdir -p "$BACKUP_DIR/$(dirname "$line")" 2>/dev/null
+ cp "$line" "$BACKUP_DIR/$line"
+ echo " โ
Backed up $line"
+ fi
+ fi
+ done < .openapi-generator-ignore
+else
+ echo " โ ๏ธ .openapi-generator-ignore file not found, using fallback list"
+ # Fallback to original hardcoded list
+ for file in example_transcribe.py simple_example.py EXAMPLE_README.md pyproject.toml; do
+ if [ -f "$file" ]; then
+ cp "$file" "$BACKUP_DIR/"
+ echo " โ
Backed up $file"
+ fi
+ done
+fi
+
+# Regenerate the client
+echo ""
+echo -e "${BLUE}๐ง Regenerating OpenAPI client into temporary directory...${NC}"
+# Create or clean the temporary directory
+rm -rf "$TEMP_OUTPUT_DIR"
+mkdir -p "$TEMP_OUTPUT_DIR"
+
+openapi-generator generate \
+ -i "$OPENAPI_SPEC_PATH" \
+ -g "$GENERATOR" \
+ -o "$TEMP_OUTPUT_DIR" \
+ --package-name speechall \
+ --skip-validate-spec \
+ --additional-properties="packageVersion=0.1.0"
+
+if [ $? -eq 0 ]; then
+ echo -e "${GREEN}โ
Client regeneration into temporary directory completed successfully!${NC}"
+else
+ echo -e "${RED}โ Client regeneration failed!${NC}"
+ rm -rf "$TEMP_OUTPUT_DIR" # Clean up temp dir on failure
+ exit 1
+fi
+
+echo -e "${BLUE}๐ Syncing generated files to output directory...${NC}"
+# Remove old generated directories and files from the primary output directory
+# Be careful here not to delete essential non-generated files.
+# Common generated items: speechall/, openapi_client/, docs/, test/, tests/, README.md, setup.py, .openapi-generator-ignore, tox.ini (sometimes)
+# The custom files backed up are: example_transcribe.py, simple_example.py, EXAMPLE_README.md, pyproject.toml
+# So, it should be safe to remove these:
+rm -rf \
+ "$OUTPUT_DIR/speechall" \
+ "$OUTPUT_DIR/openapi_client" \
+ "$OUTPUT_DIR/docs" \
+ "$OUTPUT_DIR/test" \
+ "$OUTPUT_DIR/tests" \
+ "$OUTPUT_DIR/README.md" \
+ "$OUTPUT_DIR/setup.py" \
+ "$OUTPUT_DIR/.openapi-generator-ignore" \
+ "$OUTPUT_DIR/tox.ini" \
+ "$OUTPUT_DIR/git_push.sh" \
+ "$OUTPUT_DIR/requirements.txt" # This was deleted in a previous step, but good to include
+
+# Using rsync to copy, which is generally robust. -a preserves attributes.
+# Ensure trailing slash on source for rsync to copy contents.
+# Removed --delete as rm -rf above should handle major cleaning.
+rsync -av "$TEMP_OUTPUT_DIR/" "$OUTPUT_DIR/"
+
+echo -e "${GREEN}โ
Sync complete.${NC}"
+
+# Restore all backed up files
+# This must happen AFTER rsync, as rsync would have overwritten these files
+echo -e "${YELLOW}๐ง Restoring backed up files...${NC}"
+if [ -d "$BACKUP_DIR" ]; then
+ # Restore all files from backup directory
+ find "$BACKUP_DIR" -type f | while read -r backup_file; do
+ # Get relative path by removing backup directory prefix
+ relative_path="${backup_file#"$BACKUP_DIR/"}"
+
+ # Skip if this is a directory backup
+ if [ -f "$backup_file" ]; then
+ # Create directory if it doesn't exist
+ mkdir -p "$(dirname "$relative_path")"
+ cp "$backup_file" "$relative_path"
+ echo " โ
Restored $relative_path"
+ fi
+ done
+
+ # Also restore directories
+ find "$BACKUP_DIR" -type d -mindepth 1 | while read -r backup_dir; do
+ relative_path="${backup_dir#"$BACKUP_DIR/"}"
+ if [ -d "$backup_dir" ] && [ ! -d "$relative_path" ]; then
+ cp -r "$backup_dir" "$relative_path"
+ echo " โ
Restored directory $relative_path"
+ fi
+ done
+else
+ echo " โ ๏ธ No backup directory found"
+fi
+
+# Fix hardcoded author information in setup.py
+echo -e "${YELLOW}๐ง Fixing author information in setup.py...${NC}"
+if [ -f "$OUTPUT_DIR/setup.py" ]; then
+ sed -i '' 's/author="Speechall Support"/author="Speechall"/' "$OUTPUT_DIR/setup.py"
+ sed -i '' 's/author_email="team@openapitools.org"/author_email="info@actondon.com"/' "$OUTPUT_DIR/setup.py"
+ echo " โ
Author information updated in setup.py"
+fi
+
+# Clean up temporary directory
+echo -e "${YELLOW}๐งน Cleaning up temporary directory...${NC}"
+rm -rf "$TEMP_OUTPUT_DIR"
+echo " โ
Temporary directory cleaned up."
+
+# Apply automatic fixes for known issues
+echo ""
+echo -e "${BLUE}๐ง Applying automatic fixes...${NC}"
+if [ -f "fix_transcription_response.py" ]; then
+ python3 fix_transcription_response.py
+ if [ $? -eq 0 ]; then
+ echo " โ
TranscriptionResponse oneOf fix applied"
+ else
+ echo -e "${YELLOW} โ ๏ธ TranscriptionResponse fix failed - you may need to apply it manually${NC}"
+ fi
+else
+ echo -e "${YELLOW} โ ๏ธ fix_transcription_response.py not found - skipping automatic fix${NC}"
+fi
+
+# Reinstall dependencies
+echo ""
+echo -e "${BLUE}๐ฆ Updating dependencies...${NC}"
+if command -v uv &> /dev/null; then
+ uv sync
+ echo -e "${GREEN}โ
Dependencies updated with uv${NC}"
+else
+ # pip install -r requirements.txt # requirements.txt is no longer used
+ pip install . # Install from pyproject.toml / setup.py
+ echo -e "${GREEN}โ
Dependencies updated with pip (from pyproject.toml)${NC}"
+fi
+
+# Clean up old backup if successful
+echo ""
+echo -e "${YELLOW}๐งน Cleaning up...${NC}"
+if [ -d "$BACKUP_DIR" ]; then
+ echo "Backup created at: $BACKUP_DIR"
+ echo "You can safely delete it if everything looks good: rm -rf $BACKUP_DIR"
+fi
+
+echo ""
+echo -e "${GREEN}๐ Regeneration complete!${NC}"
+echo ""
+echo -e "${BLUE}๐ Next steps:${NC}"
+echo "1. Test your examples: uv run python example_transcribe.py"
+echo "2. Check for any new models or features in the updated client"
+echo "3. Update your code if there are breaking changes"
+echo "4. Delete the backup folder once you've verified everything works"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..0a8bf96
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,5 @@
+python_dateutil >= 2.5.3
+setuptools >= 21.0.0
+urllib3 >= 1.25.3, < 3.0.0
+pydantic >= 1.10.5, < 2
+aenum >= 3.1.11
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..11433ee
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[flake8]
+max-line-length=99
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..6d5ef31
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,50 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from setuptools import setup, find_packages # noqa: H301
+
+# To install the library, run the following
+#
+# python setup.py install
+#
+# prerequisite: setuptools
+# http://pypi.python.org/pypi/setuptools
+NAME = "speechall"
+VERSION = "0.1.0"
+PYTHON_REQUIRES = ">=3.7"
+REQUIRES = [
+ "urllib3 >= 1.25.3, < 3.0.0",
+ "python-dateutil",
+ "pydantic >= 1.10.5, < 2",
+ "aenum"
+]
+
+setup(
+ name=NAME,
+ version=VERSION,
+ description="Speechall API",
+ author="Speechall",
+ author_email="info@actondon.com",
+ url="",
+ keywords=["OpenAPI", "OpenAPI-Generator", "Speechall API"],
+ install_requires=REQUIRES,
+ packages=find_packages(exclude=["test", "tests"]),
+ include_package_data=True,
+ license="MIT",
+ long_description_content_type='text/markdown',
+ long_description="""\
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+ """, # noqa: E501
+ package_data={"speechall": ["py.typed"]},
+)
diff --git a/simple_example.py b/simple_example.py
new file mode 100755
index 0000000..a229708
--- /dev/null
+++ b/simple_example.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+"""
+Simple example of using the Speechall API to transcribe audio.
+
+Set your API token: export SPEECHALL_API_TOKEN="your-token-here"
+"""
+
+import os
+from speechall import ApiClient, Configuration
+from speechall.api.speech_to_text_api import SpeechToTextApi
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+
+# Set up the API client
+configuration = Configuration()
+configuration.access_token = os.getenv('SPEECHALL_API_TOKEN')
+configuration.host = "https://api.speechall.com/v1"
+
+api_client = ApiClient(configuration)
+api_instance = SpeechToTextApi(api_client)
+
+# Example: List available models
+try:
+ print("Available models:")
+ models = api_instance.list_speech_to_text_models()
+ for model in models[:5]: # Show first 5
+ print(f"- {model.model_id}: {model.display_name}")
+except Exception as e:
+ print(f"Error listing models: {e}")
+
+# Example: Transcribe audio file
+audio_file_path = "your_audio_file.wav" # Replace with your audio file
+
+if os.path.exists(audio_file_path):
+ try:
+ with open(audio_file_path, 'rb') as f:
+ result = api_instance.transcribe(
+ model=TranscriptionModelIdentifier.OPENAI_DOT_WHISPER_MINUS_1,
+ body=f.read(),
+ language=TranscriptLanguageCode.EN
+ )
+ print(f"Transcription: {result}")
+ except Exception as e:
+ print(f"Error transcribing: {e}")
+else:
+ print(f"Audio file {audio_file_path} not found. Please provide a valid audio file.")
\ No newline at end of file
diff --git a/speechall/__init__.py b/speechall/__init__.py
new file mode 100644
index 0000000..ad88649
--- /dev/null
+++ b/speechall/__init__.py
@@ -0,0 +1,56 @@
+# coding: utf-8
+
+# flake8: noqa
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+__version__ = "0.1.0"
+
+# import apis into sdk package
+from speechall.api.replacement_rules_api import ReplacementRulesApi
+from speechall.api.speech_to_text_api import SpeechToTextApi
+
+# import ApiClient
+from speechall.api_response import ApiResponse
+from speechall.api_client import ApiClient
+from speechall.configuration import Configuration
+from speechall.exceptions import OpenApiException
+from speechall.exceptions import ApiTypeError
+from speechall.exceptions import ApiValueError
+from speechall.exceptions import ApiKeyError
+from speechall.exceptions import ApiAttributeError
+from speechall.exceptions import ApiException
+
+# import models into sdk package
+from speechall.models.base_transcription_configuration import BaseTranscriptionConfiguration
+from speechall.models.create_replacement_ruleset201_response import CreateReplacementRuleset201Response
+from speechall.models.create_replacement_ruleset_request import CreateReplacementRulesetRequest
+from speechall.models.error_response import ErrorResponse
+from speechall.models.exact_rule import ExactRule
+from speechall.models.open_ai_create_translation_request_model import OpenAICreateTranslationRequestModel
+from speechall.models.openai_compatible_create_transcription200_response import OpenaiCompatibleCreateTranscription200Response
+from speechall.models.openai_compatible_create_translation200_response import OpenaiCompatibleCreateTranslation200Response
+from speechall.models.regex_group_rule import RegexGroupRule
+from speechall.models.regex_rule import RegexRule
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration
+from speechall.models.replacement_rule import ReplacementRule
+from speechall.models.speech_to_text_model import SpeechToTextModel
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.transcription_detailed import TranscriptionDetailed
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcription_only_text import TranscriptionOnlyText
+from speechall.models.transcription_provider import TranscriptionProvider
+from speechall.models.transcription_response import TranscriptionResponse
+from speechall.models.transcription_segment import TranscriptionSegment
+from speechall.models.transcription_word import TranscriptionWord
diff --git a/speechall/api/__init__.py b/speechall/api/__init__.py
new file mode 100644
index 0000000..eebe30d
--- /dev/null
+++ b/speechall/api/__init__.py
@@ -0,0 +1,6 @@
+# flake8: noqa
+
+# import apis into api package
+from speechall.api.replacement_rules_api import ReplacementRulesApi
+from speechall.api.speech_to_text_api import SpeechToTextApi
+
diff --git a/speechall/api/replacement_rules_api.py b/speechall/api/replacement_rules_api.py
new file mode 100644
index 0000000..8fce2dc
--- /dev/null
+++ b/speechall/api/replacement_rules_api.py
@@ -0,0 +1,199 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import re # noqa: F401
+import io
+import warnings
+
+from pydantic import validate_arguments, ValidationError
+
+from typing_extensions import Annotated
+from pydantic import Field
+
+from speechall.models.create_replacement_ruleset201_response import CreateReplacementRuleset201Response
+from speechall.models.create_replacement_ruleset_request import CreateReplacementRulesetRequest
+
+from speechall.api_client import ApiClient
+from speechall.api_response import ApiResponse
+from speechall.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ReplacementRulesApi:
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None) -> None:
+ if api_client is None:
+ api_client = ApiClient.get_default()
+ self.api_client = api_client
+
+ @validate_arguments
+ def create_replacement_ruleset(self, create_replacement_ruleset_request : Annotated[CreateReplacementRulesetRequest, Field(..., description="JSON object containing the name for the ruleset and an array of replacement rule objects.")], **kwargs) -> CreateReplacementRuleset201Response: # noqa: E501
+ """Create a reusable set of text replacement rules. # noqa: E501
+
+ Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.create_replacement_ruleset(create_replacement_ruleset_request, async_req=True)
+ >>> result = thread.get()
+
+ :param create_replacement_ruleset_request: JSON object containing the name for the ruleset and an array of replacement rule objects. (required)
+ :type create_replacement_ruleset_request: CreateReplacementRulesetRequest
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _request_timeout: timeout setting for this request.
+ If one number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: CreateReplacementRuleset201Response
+ """
+ kwargs['_return_http_data_only'] = True
+ if '_preload_content' in kwargs:
+ message = "Error! Please call the create_replacement_ruleset_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
+ raise ValueError(message)
+ return self.create_replacement_ruleset_with_http_info(create_replacement_ruleset_request, **kwargs) # noqa: E501
+
+ @validate_arguments
+ def create_replacement_ruleset_with_http_info(self, create_replacement_ruleset_request : Annotated[CreateReplacementRulesetRequest, Field(..., description="JSON object containing the name for the ruleset and an array of replacement rule objects.")], **kwargs) -> ApiResponse: # noqa: E501
+ """Create a reusable set of text replacement rules. # noqa: E501
+
+ Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.create_replacement_ruleset_with_http_info(create_replacement_ruleset_request, async_req=True)
+ >>> result = thread.get()
+
+ :param create_replacement_ruleset_request: JSON object containing the name for the ruleset and an array of replacement rule objects. (required)
+ :type create_replacement_ruleset_request: CreateReplacementRulesetRequest
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _preload_content: if False, the ApiResponse.data will
+ be set to none and raw_data will store the
+ HTTP response body without reading/decoding.
+ Default is True.
+ :type _preload_content: bool, optional
+ :param _return_http_data_only: response data instead of ApiResponse
+ object with status code, headers, etc
+ :type _return_http_data_only: bool, optional
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :param _request_auth: set to override the auth_settings for an a single
+ request; this effectively ignores the authentication
+ in the spec for a single request.
+ :type _request_auth: dict, optional
+ :type _content_type: string, optional: force content-type for the request
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: tuple(CreateReplacementRuleset201Response, status_code(int), headers(HTTPHeaderDict))
+ """
+
+ _params = locals()
+
+ _all_params = [
+ 'create_replacement_ruleset_request'
+ ]
+ _all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout',
+ '_request_auth',
+ '_content_type',
+ '_headers'
+ ]
+ )
+
+ # validate the arguments
+ for _key, _val in _params['kwargs'].items():
+ if _key not in _all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_replacement_ruleset" % _key
+ )
+ _params[_key] = _val
+ del _params['kwargs']
+
+ _collection_formats = {}
+
+ # process the path parameters
+ _path_params = {}
+
+ # process the query parameters
+ _query_params = []
+ # process the header parameters
+ _header_params = dict(_params.get('_headers', {}))
+ # process the form parameters
+ _form_params = []
+ _files = {}
+ # process the body parameter
+ _body_params = None
+ if _params['create_replacement_ruleset_request'] is not None:
+ _body_params = _params['create_replacement_ruleset_request']
+
+ # set the HTTP header `Accept`
+ _header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'text/plain']) # noqa: E501
+
+ # set the HTTP header `Content-Type`
+ _content_types_list = _params.get('_content_type',
+ self.api_client.select_header_content_type(
+ ['application/json']))
+ if _content_types_list:
+ _header_params['Content-Type'] = _content_types_list
+
+ # authentication setting
+ _auth_settings = ['bearerAuth'] # noqa: E501
+
+ _response_types_map = {
+ '201': "CreateReplacementRuleset201Response",
+ '400': "ErrorResponse",
+ '401': "ErrorResponse",
+ '402': "ErrorResponse",
+ '429': "ErrorResponse",
+ '500': "ErrorResponse",
+ '503': "ErrorResponse",
+ '504': "ErrorResponse",
+ }
+
+ return self.api_client.call_api(
+ '/replacement-rulesets', 'POST',
+ _path_params,
+ _query_params,
+ _header_params,
+ body=_body_params,
+ post_params=_form_params,
+ files=_files,
+ response_types_map=_response_types_map,
+ auth_settings=_auth_settings,
+ async_req=_params.get('async_req'),
+ _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=_params.get('_preload_content', True),
+ _request_timeout=_params.get('_request_timeout'),
+ collection_formats=_collection_formats,
+ _request_auth=_params.get('_request_auth'))
diff --git a/speechall/api/speech_to_text_api.py b/speechall/api/speech_to_text_api.py
new file mode 100644
index 0000000..de9aebf
--- /dev/null
+++ b/speechall/api/speech_to_text_api.py
@@ -0,0 +1,603 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import re # noqa: F401
+import io
+import warnings
+
+from pydantic import validate_arguments, ValidationError
+
+from typing_extensions import Annotated
+from pydantic import Field, StrictBool, StrictBytes, StrictStr, confloat, conint, conlist
+
+from typing import List, Optional, Union
+
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration
+from speechall.models.speech_to_text_model import SpeechToTextModel
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcription_response import TranscriptionResponse
+
+from speechall.api_client import ApiClient
+from speechall.api_response import ApiResponse
+from speechall.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class SpeechToTextApi:
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None) -> None:
+ if api_client is None:
+ api_client = ApiClient.get_default()
+ self.api_client = api_client
+
+ @validate_arguments
+ def list_speech_to_text_models(self, **kwargs) -> List[SpeechToTextModel]: # noqa: E501
+ """Retrieve a list of all available speech-to-text models. # noqa: E501
+
+ Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.list_speech_to_text_models(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _request_timeout: timeout setting for this request.
+ If one number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: List[SpeechToTextModel]
+ """
+ kwargs['_return_http_data_only'] = True
+ if '_preload_content' in kwargs:
+ message = "Error! Please call the list_speech_to_text_models_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
+ raise ValueError(message)
+ return self.list_speech_to_text_models_with_http_info(**kwargs) # noqa: E501
+
+ @validate_arguments
+ def list_speech_to_text_models_with_http_info(self, **kwargs) -> ApiResponse: # noqa: E501
+ """Retrieve a list of all available speech-to-text models. # noqa: E501
+
+ Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.list_speech_to_text_models_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _preload_content: if False, the ApiResponse.data will
+ be set to none and raw_data will store the
+ HTTP response body without reading/decoding.
+ Default is True.
+ :type _preload_content: bool, optional
+ :param _return_http_data_only: response data instead of ApiResponse
+ object with status code, headers, etc
+ :type _return_http_data_only: bool, optional
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :param _request_auth: set to override the auth_settings for an a single
+ request; this effectively ignores the authentication
+ in the spec for a single request.
+ :type _request_auth: dict, optional
+ :type _content_type: string, optional: force content-type for the request
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: tuple(List[SpeechToTextModel], status_code(int), headers(HTTPHeaderDict))
+ """
+
+ _params = locals()
+
+ _all_params = [
+ ]
+ _all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout',
+ '_request_auth',
+ '_content_type',
+ '_headers'
+ ]
+ )
+
+ # validate the arguments
+ for _key, _val in _params['kwargs'].items():
+ if _key not in _all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_speech_to_text_models" % _key
+ )
+ _params[_key] = _val
+ del _params['kwargs']
+
+ _collection_formats = {}
+
+ # process the path parameters
+ _path_params = {}
+
+ # process the query parameters
+ _query_params = []
+ # process the header parameters
+ _header_params = dict(_params.get('_headers', {}))
+ # process the form parameters
+ _form_params = []
+ _files = {}
+ # process the body parameter
+ _body_params = None
+ # set the HTTP header `Accept`
+ _header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'text/plain']) # noqa: E501
+
+ # authentication setting
+ _auth_settings = ['bearerAuth'] # noqa: E501
+
+ _response_types_map = {
+ '200': "List[SpeechToTextModel]",
+ '400': "ErrorResponse",
+ '401': "ErrorResponse",
+ '402': "ErrorResponse",
+ '404': "ErrorResponse",
+ '429': "ErrorResponse",
+ '500': "ErrorResponse",
+ '503': "ErrorResponse",
+ '504': "ErrorResponse",
+ }
+
+ return self.api_client.call_api(
+ '/speech-to-text-models', 'GET',
+ _path_params,
+ _query_params,
+ _header_params,
+ body=_body_params,
+ post_params=_form_params,
+ files=_files,
+ response_types_map=_response_types_map,
+ auth_settings=_auth_settings,
+ async_req=_params.get('async_req'),
+ _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=_params.get('_preload_content', True),
+ _request_timeout=_params.get('_request_timeout'),
+ collection_formats=_collection_formats,
+ _request_auth=_params.get('_request_auth'))
+
+ @validate_arguments
+ def transcribe(self, model : Annotated[TranscriptionModelIdentifier, Field(..., description="The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models.")], body : Annotated[Union[StrictBytes, StrictStr], Field(..., description="The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration.")], language : Annotated[Optional[TranscriptLanguageCode], Field(description="The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency.")] = None, output_format : Annotated[Optional[TranscriptOutputFormat], Field(description="The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`.")] = None, ruleset_id : Annotated[Optional[StrictStr], Field(description="The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint.")] = None, punctuation : Annotated[Optional[StrictBool], Field(description="Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`.")] = None, timestamp_granularity : Annotated[Optional[StrictStr], Field(description="Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`.")] = None, diarization : Annotated[Optional[StrictBool], Field(description="Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments.")] = None, initial_prompt : Annotated[Optional[StrictStr], Field(description="An optional text prompt to provide context, guide the model's style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models).")] = None, temperature : Annotated[Optional[Union[confloat(le=1, ge=0, strict=True), conint(le=1.0, ge=0.0, strict=True)]], Field(description="Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model.")] = None, smart_format : Annotated[Optional[StrictBool], Field(description="Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary.")] = None, speakers_expected : Annotated[Optional[conint(strict=True, le=10, ge=1)], Field(description="Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram).")] = None, custom_vocabulary : Annotated[Optional[conlist(StrictStr)], Field(description="Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI).")] = None, **kwargs) -> TranscriptionResponse: # noqa: E501
+ """Upload an audio file directly and receive a transcription. # noqa: E501
+
+ This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.transcribe(model, body, language, output_format, ruleset_id, punctuation, timestamp_granularity, diarization, initial_prompt, temperature, smart_format, speakers_expected, custom_vocabulary, async_req=True)
+ >>> result = thread.get()
+
+ :param model: The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. (required)
+ :type model: TranscriptionModelIdentifier
+ :param body: The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. (required)
+ :type body: bytearray
+ :param language: The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency.
+ :type language: TranscriptLanguageCode
+ :param output_format: The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`.
+ :type output_format: TranscriptOutputFormat
+ :param ruleset_id: The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint.
+ :type ruleset_id: str
+ :param punctuation: Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`.
+ :type punctuation: bool
+ :param timestamp_granularity: Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`.
+ :type timestamp_granularity: str
+ :param diarization: Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments.
+ :type diarization: bool
+ :param initial_prompt: An optional text prompt to provide context, guide the model's style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models).
+ :type initial_prompt: str
+ :param temperature: Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model.
+ :type temperature: float
+ :param smart_format: Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary.
+ :type smart_format: bool
+ :param speakers_expected: Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram).
+ :type speakers_expected: int
+ :param custom_vocabulary: Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI).
+ :type custom_vocabulary: List[str]
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _request_timeout: timeout setting for this request.
+ If one number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: TranscriptionResponse
+ """
+ kwargs['_return_http_data_only'] = True
+ if '_preload_content' in kwargs:
+ message = "Error! Please call the transcribe_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
+ raise ValueError(message)
+ return self.transcribe_with_http_info(model, body, language, output_format, ruleset_id, punctuation, timestamp_granularity, diarization, initial_prompt, temperature, smart_format, speakers_expected, custom_vocabulary, **kwargs) # noqa: E501
+
+ @validate_arguments
+ def transcribe_with_http_info(self, model : Annotated[TranscriptionModelIdentifier, Field(..., description="The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models.")], body : Annotated[Union[StrictBytes, StrictStr], Field(..., description="The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration.")], language : Annotated[Optional[TranscriptLanguageCode], Field(description="The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency.")] = None, output_format : Annotated[Optional[TranscriptOutputFormat], Field(description="The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`.")] = None, ruleset_id : Annotated[Optional[StrictStr], Field(description="The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint.")] = None, punctuation : Annotated[Optional[StrictBool], Field(description="Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`.")] = None, timestamp_granularity : Annotated[Optional[StrictStr], Field(description="Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`.")] = None, diarization : Annotated[Optional[StrictBool], Field(description="Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments.")] = None, initial_prompt : Annotated[Optional[StrictStr], Field(description="An optional text prompt to provide context, guide the model's style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models).")] = None, temperature : Annotated[Optional[Union[confloat(le=1, ge=0, strict=True), conint(le=1.0, ge=0.0, strict=True)]], Field(description="Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model.")] = None, smart_format : Annotated[Optional[StrictBool], Field(description="Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary.")] = None, speakers_expected : Annotated[Optional[conint(strict=True, le=10, ge=1)], Field(description="Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram).")] = None, custom_vocabulary : Annotated[Optional[conlist(StrictStr)], Field(description="Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI).")] = None, **kwargs) -> ApiResponse: # noqa: E501
+ """Upload an audio file directly and receive a transcription. # noqa: E501
+
+ This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.transcribe_with_http_info(model, body, language, output_format, ruleset_id, punctuation, timestamp_granularity, diarization, initial_prompt, temperature, smart_format, speakers_expected, custom_vocabulary, async_req=True)
+ >>> result = thread.get()
+
+ :param model: The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. (required)
+ :type model: TranscriptionModelIdentifier
+ :param body: The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. (required)
+ :type body: bytearray
+ :param language: The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency.
+ :type language: TranscriptLanguageCode
+ :param output_format: The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`.
+ :type output_format: TranscriptOutputFormat
+ :param ruleset_id: The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint.
+ :type ruleset_id: str
+ :param punctuation: Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`.
+ :type punctuation: bool
+ :param timestamp_granularity: Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`.
+ :type timestamp_granularity: str
+ :param diarization: Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments.
+ :type diarization: bool
+ :param initial_prompt: An optional text prompt to provide context, guide the model's style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models).
+ :type initial_prompt: str
+ :param temperature: Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model.
+ :type temperature: float
+ :param smart_format: Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary.
+ :type smart_format: bool
+ :param speakers_expected: Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram).
+ :type speakers_expected: int
+ :param custom_vocabulary: Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI).
+ :type custom_vocabulary: List[str]
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _preload_content: if False, the ApiResponse.data will
+ be set to none and raw_data will store the
+ HTTP response body without reading/decoding.
+ Default is True.
+ :type _preload_content: bool, optional
+ :param _return_http_data_only: response data instead of ApiResponse
+ object with status code, headers, etc
+ :type _return_http_data_only: bool, optional
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :param _request_auth: set to override the auth_settings for an a single
+ request; this effectively ignores the authentication
+ in the spec for a single request.
+ :type _request_auth: dict, optional
+ :type _content_type: string, optional: force content-type for the request
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: tuple(TranscriptionResponse, status_code(int), headers(HTTPHeaderDict))
+ """
+
+ _params = locals()
+
+ _all_params = [
+ 'model',
+ 'body',
+ 'language',
+ 'output_format',
+ 'ruleset_id',
+ 'punctuation',
+ 'timestamp_granularity',
+ 'diarization',
+ 'initial_prompt',
+ 'temperature',
+ 'smart_format',
+ 'speakers_expected',
+ 'custom_vocabulary'
+ ]
+ _all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout',
+ '_request_auth',
+ '_content_type',
+ '_headers'
+ ]
+ )
+
+ # validate the arguments
+ for _key, _val in _params['kwargs'].items():
+ if _key not in _all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method transcribe" % _key
+ )
+ _params[_key] = _val
+ del _params['kwargs']
+
+ _collection_formats = {}
+
+ # process the path parameters
+ _path_params = {}
+
+ # process the query parameters
+ _query_params = []
+ if _params.get('model') is not None: # noqa: E501
+ _query_params.append(('model', _params['model'].value))
+
+ if _params.get('language') is not None: # noqa: E501
+ _query_params.append(('language', _params['language'].value))
+
+ if _params.get('output_format') is not None: # noqa: E501
+ _query_params.append(('output_format', _params['output_format'].value))
+
+ if _params.get('ruleset_id') is not None: # noqa: E501
+ _query_params.append(('ruleset_id', _params['ruleset_id']))
+
+ if _params.get('punctuation') is not None: # noqa: E501
+ _query_params.append(('punctuation', _params['punctuation']))
+
+ if _params.get('timestamp_granularity') is not None: # noqa: E501
+ _query_params.append(('timestamp_granularity', _params['timestamp_granularity']))
+
+ if _params.get('diarization') is not None: # noqa: E501
+ _query_params.append(('diarization', _params['diarization']))
+
+ if _params.get('initial_prompt') is not None: # noqa: E501
+ _query_params.append(('initial_prompt', _params['initial_prompt']))
+
+ if _params.get('temperature') is not None: # noqa: E501
+ _query_params.append(('temperature', _params['temperature']))
+
+ if _params.get('smart_format') is not None: # noqa: E501
+ _query_params.append(('smart_format', _params['smart_format']))
+
+ if _params.get('speakers_expected') is not None: # noqa: E501
+ _query_params.append(('speakers_expected', _params['speakers_expected']))
+
+ if _params.get('custom_vocabulary') is not None: # noqa: E501
+ _query_params.append(('custom_vocabulary', _params['custom_vocabulary']))
+ _collection_formats['custom_vocabulary'] = 'multi'
+
+ # process the header parameters
+ _header_params = dict(_params.get('_headers', {}))
+ # process the form parameters
+ _form_params = []
+ _files = {}
+ # process the body parameter
+ _body_params = None
+ if _params['body'] is not None:
+ _body_params = _params['body']
+ # convert to byte array if the input is a file name (str)
+ if isinstance(_body_params, str):
+ with io.open(_body_params, "rb") as _fp:
+ _body_params_from_file = _fp.read()
+ _body_params = _body_params_from_file
+
+ # set the HTTP header `Accept`
+ _header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'text/plain']) # noqa: E501
+
+ # set the HTTP header `Content-Type`
+ _content_types_list = _params.get('_content_type',
+ self.api_client.select_header_content_type(
+ ['audio/*']))
+ if _content_types_list:
+ _header_params['Content-Type'] = _content_types_list
+
+ # authentication setting
+ _auth_settings = ['bearerAuth'] # noqa: E501
+
+ _response_types_map = {
+ '200': "TranscriptionResponse",
+ '400': "ErrorResponse",
+ '401': "ErrorResponse",
+ '402': "ErrorResponse",
+ '404': "ErrorResponse",
+ '429': "ErrorResponse",
+ '500': "ErrorResponse",
+ '503': "ErrorResponse",
+ '504': "ErrorResponse",
+ }
+
+ return self.api_client.call_api(
+ '/transcribe', 'POST',
+ _path_params,
+ _query_params,
+ _header_params,
+ body=_body_params,
+ post_params=_form_params,
+ files=_files,
+ response_types_map=_response_types_map,
+ auth_settings=_auth_settings,
+ async_req=_params.get('async_req'),
+ _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=_params.get('_preload_content', True),
+ _request_timeout=_params.get('_request_timeout'),
+ collection_formats=_collection_formats,
+ _request_auth=_params.get('_request_auth'))
+
+ @validate_arguments
+ def transcribe_remote(self, remote_transcription_configuration : Annotated[RemoteTranscriptionConfiguration, Field(..., description="JSON object containing the URL of the audio file and the desired transcription options.")], **kwargs) -> TranscriptionResponse: # noqa: E501
+ """Transcribe an audio file located at a remote URL. # noqa: E501
+
+ This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.transcribe_remote(remote_transcription_configuration, async_req=True)
+ >>> result = thread.get()
+
+ :param remote_transcription_configuration: JSON object containing the URL of the audio file and the desired transcription options. (required)
+ :type remote_transcription_configuration: RemoteTranscriptionConfiguration
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _request_timeout: timeout setting for this request.
+ If one number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: TranscriptionResponse
+ """
+ kwargs['_return_http_data_only'] = True
+ if '_preload_content' in kwargs:
+ message = "Error! Please call the transcribe_remote_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
+ raise ValueError(message)
+ return self.transcribe_remote_with_http_info(remote_transcription_configuration, **kwargs) # noqa: E501
+
+ @validate_arguments
+ def transcribe_remote_with_http_info(self, remote_transcription_configuration : Annotated[RemoteTranscriptionConfiguration, Field(..., description="JSON object containing the URL of the audio file and the desired transcription options.")], **kwargs) -> ApiResponse: # noqa: E501
+ """Transcribe an audio file located at a remote URL. # noqa: E501
+
+ This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+
+ >>> thread = api.transcribe_remote_with_http_info(remote_transcription_configuration, async_req=True)
+ >>> result = thread.get()
+
+ :param remote_transcription_configuration: JSON object containing the URL of the audio file and the desired transcription options. (required)
+ :type remote_transcription_configuration: RemoteTranscriptionConfiguration
+ :param async_req: Whether to execute the request asynchronously.
+ :type async_req: bool, optional
+ :param _preload_content: if False, the ApiResponse.data will
+ be set to none and raw_data will store the
+ HTTP response body without reading/decoding.
+ Default is True.
+ :type _preload_content: bool, optional
+ :param _return_http_data_only: response data instead of ApiResponse
+ object with status code, headers, etc
+ :type _return_http_data_only: bool, optional
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :param _request_auth: set to override the auth_settings for an a single
+ request; this effectively ignores the authentication
+ in the spec for a single request.
+ :type _request_auth: dict, optional
+ :type _content_type: string, optional: force content-type for the request
+ :return: Returns the result object.
+ If the method is called asynchronously,
+ returns the request thread.
+ :rtype: tuple(TranscriptionResponse, status_code(int), headers(HTTPHeaderDict))
+ """
+
+ _params = locals()
+
+ _all_params = [
+ 'remote_transcription_configuration'
+ ]
+ _all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout',
+ '_request_auth',
+ '_content_type',
+ '_headers'
+ ]
+ )
+
+ # validate the arguments
+ for _key, _val in _params['kwargs'].items():
+ if _key not in _all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method transcribe_remote" % _key
+ )
+ _params[_key] = _val
+ del _params['kwargs']
+
+ _collection_formats = {}
+
+ # process the path parameters
+ _path_params = {}
+
+ # process the query parameters
+ _query_params = []
+ # process the header parameters
+ _header_params = dict(_params.get('_headers', {}))
+ # process the form parameters
+ _form_params = []
+ _files = {}
+ # process the body parameter
+ _body_params = None
+ if _params['remote_transcription_configuration'] is not None:
+ _body_params = _params['remote_transcription_configuration']
+
+ # set the HTTP header `Accept`
+ _header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'text/plain']) # noqa: E501
+
+ # set the HTTP header `Content-Type`
+ _content_types_list = _params.get('_content_type',
+ self.api_client.select_header_content_type(
+ ['application/json']))
+ if _content_types_list:
+ _header_params['Content-Type'] = _content_types_list
+
+ # authentication setting
+ _auth_settings = ['bearerAuth'] # noqa: E501
+
+ _response_types_map = {
+ '200': "TranscriptionResponse",
+ '400': "ErrorResponse",
+ '401': "ErrorResponse",
+ '402': "ErrorResponse",
+ '404': "ErrorResponse",
+ '429': "ErrorResponse",
+ '500': "ErrorResponse",
+ '503': "ErrorResponse",
+ '504': "ErrorResponse",
+ }
+
+ return self.api_client.call_api(
+ '/transcribe-remote', 'POST',
+ _path_params,
+ _query_params,
+ _header_params,
+ body=_body_params,
+ post_params=_form_params,
+ files=_files,
+ response_types_map=_response_types_map,
+ auth_settings=_auth_settings,
+ async_req=_params.get('async_req'),
+ _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=_params.get('_preload_content', True),
+ _request_timeout=_params.get('_request_timeout'),
+ collection_formats=_collection_formats,
+ _request_auth=_params.get('_request_auth'))
diff --git a/speechall/api_client.py b/speechall/api_client.py
new file mode 100644
index 0000000..2a86189
--- /dev/null
+++ b/speechall/api_client.py
@@ -0,0 +1,766 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import atexit
+import datetime
+from dateutil.parser import parse
+import json
+import mimetypes
+from multiprocessing.pool import ThreadPool
+import os
+import re
+import tempfile
+
+from urllib.parse import quote
+from pydantic import SecretStr
+
+from speechall.configuration import Configuration
+from speechall.api_response import ApiResponse
+import speechall.models
+from speechall import rest
+from speechall.exceptions import ApiValueError, ApiException
+
+
+class ApiClient:
+ """Generic API client for OpenAPI client library builds.
+
+ OpenAPI generic API client. This client handles the client-
+ server communication, and is invariant across implementations. Specifics of
+ the methods and models for each application are generated from the OpenAPI
+ templates.
+
+ :param configuration: .Configuration object for this client
+ :param header_name: a header to pass when making calls to the API.
+ :param header_value: a header value to pass when making calls to
+ the API.
+ :param cookie: a cookie to include in the header when making calls
+ to the API
+ :param pool_threads: The number of threads to use for async requests
+ to the API. More threads means more concurrent API requests.
+ """
+
+ PRIMITIVE_TYPES = (float, bool, bytes, str, int)
+ NATIVE_TYPES_MAPPING = {
+ 'int': int,
+ 'long': int, # TODO remove as only py3 is supported?
+ 'float': float,
+ 'str': str,
+ 'bool': bool,
+ 'date': datetime.date,
+ 'datetime': datetime.datetime,
+ 'object': object,
+ }
+ _pool = None
+
+ def __init__(self, configuration=None, header_name=None, header_value=None,
+ cookie=None, pool_threads=1) -> None:
+ # use default configuration if none is provided
+ if configuration is None:
+ configuration = Configuration.get_default()
+ self.configuration = configuration
+ self.pool_threads = pool_threads
+
+ self.rest_client = rest.RESTClientObject(configuration)
+ self.default_headers = {}
+ if header_name is not None:
+ self.default_headers[header_name] = header_value
+ self.cookie = cookie
+ # Set default User-Agent.
+ self.user_agent = 'OpenAPI-Generator/0.1.0/python'
+ self.client_side_validation = configuration.client_side_validation
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ if self._pool:
+ self._pool.close()
+ self._pool.join()
+ self._pool = None
+ if hasattr(atexit, 'unregister'):
+ atexit.unregister(self.close)
+
+ @property
+ def pool(self):
+ """Create thread pool on first request
+ avoids instantiating unused threadpool for blocking clients.
+ """
+ if self._pool is None:
+ atexit.register(self.close)
+ self._pool = ThreadPool(self.pool_threads)
+ return self._pool
+
+ @property
+ def user_agent(self):
+ """User agent for this API client"""
+ return self.default_headers['User-Agent']
+
+ @user_agent.setter
+ def user_agent(self, value):
+ self.default_headers['User-Agent'] = value
+
+ def set_default_header(self, header_name, header_value):
+ self.default_headers[header_name] = header_value
+
+
+ _default = None
+
+ @classmethod
+ def get_default(cls):
+ """Return new instance of ApiClient.
+
+ This method returns newly created, based on default constructor,
+ object of ApiClient class or returns a copy of default
+ ApiClient.
+
+ :return: The ApiClient object.
+ """
+ if cls._default is None:
+ cls._default = ApiClient()
+ return cls._default
+
+ @classmethod
+ def set_default(cls, default):
+ """Set default instance of ApiClient.
+
+ It stores default ApiClient.
+
+ :param default: object of ApiClient.
+ """
+ cls._default = default
+
+ def __call_api(
+ self, resource_path, method, path_params=None,
+ query_params=None, header_params=None, body=None, post_params=None,
+ files=None, response_types_map=None, auth_settings=None,
+ _return_http_data_only=None, collection_formats=None,
+ _preload_content=True, _request_timeout=None, _host=None,
+ _request_auth=None):
+
+ config = self.configuration
+
+ # header parameters
+ header_params = header_params or {}
+ header_params.update(self.default_headers)
+ if self.cookie:
+ header_params['Cookie'] = self.cookie
+ if header_params:
+ header_params = self.sanitize_for_serialization(header_params)
+ header_params = dict(self.parameters_to_tuples(header_params,
+ collection_formats))
+
+ # path parameters
+ if path_params:
+ path_params = self.sanitize_for_serialization(path_params)
+ path_params = self.parameters_to_tuples(path_params,
+ collection_formats)
+ for k, v in path_params:
+ # specified safe chars, encode everything
+ resource_path = resource_path.replace(
+ '{%s}' % k,
+ quote(str(v), safe=config.safe_chars_for_path_param)
+ )
+
+ # post parameters
+ if post_params or files:
+ post_params = post_params if post_params else []
+ post_params = self.sanitize_for_serialization(post_params)
+ post_params = self.parameters_to_tuples(post_params,
+ collection_formats)
+ post_params.extend(self.files_parameters(files))
+
+ # auth setting
+ self.update_params_for_auth(
+ header_params, query_params, auth_settings,
+ resource_path, method, body,
+ request_auth=_request_auth)
+
+ # body
+ if body:
+ body = self.sanitize_for_serialization(body)
+
+ # request url
+ if _host is None:
+ url = self.configuration.host + resource_path
+ else:
+ # use server/host defined in path or operation instead
+ url = _host + resource_path
+
+ # query parameters
+ if query_params:
+ query_params = self.sanitize_for_serialization(query_params)
+ url_query = self.parameters_to_url_query(query_params,
+ collection_formats)
+ url += "?" + url_query
+
+ try:
+ # perform request and return response
+ response_data = self.request(
+ method, url,
+ query_params=query_params,
+ headers=header_params,
+ post_params=post_params, body=body,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout)
+ except ApiException as e:
+ if e.body:
+ e.body = e.body.decode('utf-8')
+ raise e
+
+ self.last_response = response_data
+
+ return_data = None # assuming deserialization is not needed
+ # data needs deserialization or returns HTTP data (deserialized) only
+ if _preload_content or _return_http_data_only:
+ response_type = response_types_map.get(str(response_data.status), None)
+ if not response_type and isinstance(response_data.status, int) and 100 <= response_data.status <= 599:
+ # if not found, look for '1XX', '2XX', etc.
+ response_type = response_types_map.get(str(response_data.status)[0] + "XX", None)
+
+ if response_type == "bytearray":
+ response_data.data = response_data.data
+ else:
+ match = None
+ content_type = response_data.getheader('content-type')
+ if content_type is not None:
+ match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type)
+ encoding = match.group(1) if match else "utf-8"
+ response_data.data = response_data.data.decode(encoding)
+
+ # deserialize response data
+ if response_type == "bytearray":
+ return_data = response_data.data
+ elif response_type:
+ return_data = self.deserialize(response_data, response_type)
+ else:
+ return_data = None
+
+ if _return_http_data_only:
+ return return_data
+ else:
+ return ApiResponse(status_code = response_data.status,
+ data = return_data,
+ headers = response_data.getheaders(),
+ raw_data = response_data.data)
+
+ def sanitize_for_serialization(self, obj):
+ """Builds a JSON POST object.
+
+ If obj is None, return None.
+ If obj is SecretStr, return obj.get_secret_value()
+ If obj is str, int, long, float, bool, return directly.
+ If obj is datetime.datetime, datetime.date
+ convert to string in iso8601 format.
+ If obj is list, sanitize each element in the list.
+ If obj is dict, return the dict.
+ If obj is OpenAPI model, return the properties dict.
+
+ :param obj: The data to serialize.
+ :return: The serialized form of data.
+ """
+ if obj is None:
+ return None
+ elif isinstance(obj, SecretStr):
+ return obj.get_secret_value()
+ elif isinstance(obj, self.PRIMITIVE_TYPES):
+ return obj
+ elif isinstance(obj, list):
+ return [self.sanitize_for_serialization(sub_obj)
+ for sub_obj in obj]
+ elif isinstance(obj, tuple):
+ return tuple(self.sanitize_for_serialization(sub_obj)
+ for sub_obj in obj)
+ elif isinstance(obj, (datetime.datetime, datetime.date)):
+ return obj.isoformat()
+
+ if isinstance(obj, dict):
+ obj_dict = obj
+ else:
+ # Convert model obj to dict except
+ # attributes `openapi_types`, `attribute_map`
+ # and attributes which value is not None.
+ # Convert attribute name to json key in
+ # model definition for request.
+ if hasattr(obj, 'to_dict') and callable(getattr(obj, 'to_dict')):
+ obj_dict = obj.to_dict()
+ else:
+ obj_dict = obj.__dict__
+
+ return {key: self.sanitize_for_serialization(val)
+ for key, val in obj_dict.items()}
+
+ def deserialize(self, response, response_type):
+ """Deserializes response into an object.
+
+ :param response: RESTResponse object to be deserialized.
+ :param response_type: class literal for
+ deserialized object, or string of class name.
+
+ :return: deserialized object.
+ """
+ # handle file downloading
+ # save response body into a tmp file and return the instance
+ if response_type == "file":
+ return self.__deserialize_file(response)
+
+ # fetch data from response object
+ try:
+ data = json.loads(response.data)
+ except ValueError:
+ data = response.data
+
+ return self.__deserialize(data, response_type)
+
+ def __deserialize(self, data, klass):
+ """Deserializes dict, list, str into an object.
+
+ :param data: dict, list or str.
+ :param klass: class literal, or string of class name.
+
+ :return: object.
+ """
+ if data is None:
+ return None
+
+ if isinstance(klass, str):
+ if klass.startswith('List['):
+ sub_kls = re.match(r'List\[(.*)]', klass).group(1)
+ return [self.__deserialize(sub_data, sub_kls)
+ for sub_data in data]
+
+ if klass.startswith('Dict['):
+ sub_kls = re.match(r'Dict\[([^,]*), (.*)]', klass).group(2)
+ return {k: self.__deserialize(v, sub_kls)
+ for k, v in data.items()}
+
+ # convert str to class
+ if klass in self.NATIVE_TYPES_MAPPING:
+ klass = self.NATIVE_TYPES_MAPPING[klass]
+ else:
+ klass = getattr(speechall.models, klass)
+
+ if klass in self.PRIMITIVE_TYPES:
+ return self.__deserialize_primitive(data, klass)
+ elif klass == object:
+ return self.__deserialize_object(data)
+ elif klass == datetime.date:
+ return self.__deserialize_date(data)
+ elif klass == datetime.datetime:
+ return self.__deserialize_datetime(data)
+ else:
+ return self.__deserialize_model(data, klass)
+
+ def call_api(self, resource_path, method,
+ path_params=None, query_params=None, header_params=None,
+ body=None, post_params=None, files=None,
+ response_types_map=None, auth_settings=None,
+ async_req=None, _return_http_data_only=None,
+ collection_formats=None, _preload_content=True,
+ _request_timeout=None, _host=None, _request_auth=None):
+ """Makes the HTTP request (synchronous) and returns deserialized data.
+
+ To make an async_req request, set the async_req parameter.
+
+ :param resource_path: Path to method endpoint.
+ :param method: Method to call.
+ :param path_params: Path parameters in the url.
+ :param query_params: Query parameters in the url.
+ :param header_params: Header parameters to be
+ placed in the request header.
+ :param body: Request body.
+ :param post_params dict: Request post form parameters,
+ for `application/x-www-form-urlencoded`, `multipart/form-data`.
+ :param auth_settings list: Auth Settings names for the request.
+ :param response: Response data type.
+ :param files dict: key -> filename, value -> filepath,
+ for `multipart/form-data`.
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data instead of ApiResponse
+ object with status code, headers, etc
+ :param _preload_content: if False, the ApiResponse.data will
+ be set to none and raw_data will store the
+ HTTP response body without reading/decoding.
+ Default is True.
+ :param collection_formats: dict of collection formats for path, query,
+ header, and post parameters.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :param _request_auth: set to override the auth_settings for an a single
+ request; this effectively ignores the authentication
+ in the spec for a single request.
+ :type _request_token: dict, optional
+ :return:
+ If async_req parameter is True,
+ the request will be called asynchronously.
+ The method will return the request thread.
+ If parameter async_req is False or missing,
+ then the method will return the response directly.
+ """
+ args = (
+ resource_path,
+ method,
+ path_params,
+ query_params,
+ header_params,
+ body,
+ post_params,
+ files,
+ response_types_map,
+ auth_settings,
+ _return_http_data_only,
+ collection_formats,
+ _preload_content,
+ _request_timeout,
+ _host,
+ _request_auth,
+ )
+ if not async_req:
+ return self.__call_api(*args)
+
+ return self.pool.apply_async(self.__call_api, args)
+
+ def request(self, method, url, query_params=None, headers=None,
+ post_params=None, body=None, _preload_content=True,
+ _request_timeout=None):
+ """Makes the HTTP request using RESTClient."""
+ if method == "GET":
+ return self.rest_client.get_request(url,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ headers=headers)
+ elif method == "HEAD":
+ return self.rest_client.head_request(url,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ headers=headers)
+ elif method == "OPTIONS":
+ return self.rest_client.options_request(url,
+ query_params=query_params,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout)
+ elif method == "POST":
+ return self.rest_client.post_request(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "PUT":
+ return self.rest_client.put_request(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "PATCH":
+ return self.rest_client.patch_request(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "DELETE":
+ return self.rest_client.delete_request(url,
+ query_params=query_params,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ else:
+ raise ApiValueError(
+ "http method must be `GET`, `HEAD`, `OPTIONS`,"
+ " `POST`, `PATCH`, `PUT` or `DELETE`."
+ )
+
+ def parameters_to_tuples(self, params, collection_formats):
+ """Get parameters as list of tuples, formatting collections.
+
+ :param params: Parameters as dict or list of two-tuples
+ :param dict collection_formats: Parameter collection formats
+ :return: Parameters as list of tuples, collections formatted
+ """
+ new_params = []
+ if collection_formats is None:
+ collection_formats = {}
+ for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
+ if k in collection_formats:
+ collection_format = collection_formats[k]
+ if collection_format == 'multi':
+ new_params.extend((k, value) for value in v)
+ else:
+ if collection_format == 'ssv':
+ delimiter = ' '
+ elif collection_format == 'tsv':
+ delimiter = '\t'
+ elif collection_format == 'pipes':
+ delimiter = '|'
+ else: # csv is the default
+ delimiter = ','
+ new_params.append(
+ (k, delimiter.join(str(value) for value in v)))
+ else:
+ new_params.append((k, v))
+ return new_params
+
+ def parameters_to_url_query(self, params, collection_formats):
+ """Get parameters as list of tuples, formatting collections.
+
+ :param params: Parameters as dict or list of two-tuples
+ :param dict collection_formats: Parameter collection formats
+ :return: URL query string (e.g. a=Hello%20World&b=123)
+ """
+ new_params = []
+ if collection_formats is None:
+ collection_formats = {}
+ for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
+ if isinstance(v, bool):
+ v = str(v).lower()
+ if isinstance(v, (int, float)):
+ v = str(v)
+ if isinstance(v, dict):
+ v = json.dumps(v)
+
+ if k in collection_formats:
+ collection_format = collection_formats[k]
+ if collection_format == 'multi':
+ new_params.extend((k, quote(str(value))) for value in v)
+ else:
+ if collection_format == 'ssv':
+ delimiter = ' '
+ elif collection_format == 'tsv':
+ delimiter = '\t'
+ elif collection_format == 'pipes':
+ delimiter = '|'
+ else: # csv is the default
+ delimiter = ','
+ new_params.append(
+ (k, delimiter.join(quote(str(value)) for value in v)))
+ else:
+ new_params.append((k, quote(str(v))))
+
+ return "&".join(["=".join(map(str, item)) for item in new_params])
+
+ def files_parameters(self, files=None):
+ """Builds form parameters.
+
+ :param files: File parameters.
+ :return: Form parameters with files.
+ """
+ params = []
+
+ if files:
+ for k, v in files.items():
+ if not v:
+ continue
+ file_names = v if type(v) is list else [v]
+ for n in file_names:
+ with open(n, 'rb') as f:
+ filename = os.path.basename(f.name)
+ filedata = f.read()
+ mimetype = (mimetypes.guess_type(filename)[0] or
+ 'application/octet-stream')
+ params.append(
+ tuple([k, tuple([filename, filedata, mimetype])]))
+
+ return params
+
+ def select_header_accept(self, accepts):
+ """Returns `Accept` based on an array of accepts provided.
+
+ :param accepts: List of headers.
+ :return: Accept (e.g. application/json).
+ """
+ if not accepts:
+ return
+
+ for accept in accepts:
+ if re.search('json', accept, re.IGNORECASE):
+ return accept
+
+ return accepts[0]
+
+ def select_header_content_type(self, content_types):
+ """Returns `Content-Type` based on an array of content_types provided.
+
+ :param content_types: List of content-types.
+ :return: Content-Type (e.g. application/json).
+ """
+ if not content_types:
+ return None
+
+ for content_type in content_types:
+ if re.search('json', content_type, re.IGNORECASE):
+ return content_type
+
+ return content_types[0]
+
+ def update_params_for_auth(self, headers, queries, auth_settings,
+ resource_path, method, body,
+ request_auth=None):
+ """Updates header and query params based on authentication setting.
+
+ :param headers: Header parameters dict to be updated.
+ :param queries: Query parameters tuple list to be updated.
+ :param auth_settings: Authentication setting identifiers list.
+ :resource_path: A string representation of the HTTP request resource path.
+ :method: A string representation of the HTTP request method.
+ :body: A object representing the body of the HTTP request.
+ The object type is the return value of sanitize_for_serialization().
+ :param request_auth: if set, the provided settings will
+ override the token in the configuration.
+ """
+ if not auth_settings:
+ return
+
+ if request_auth:
+ self._apply_auth_params(headers, queries,
+ resource_path, method, body,
+ request_auth)
+ return
+
+ for auth in auth_settings:
+ auth_setting = self.configuration.auth_settings().get(auth)
+ if auth_setting:
+ self._apply_auth_params(headers, queries,
+ resource_path, method, body,
+ auth_setting)
+
+ def _apply_auth_params(self, headers, queries,
+ resource_path, method, body,
+ auth_setting):
+ """Updates the request parameters based on a single auth_setting
+
+ :param headers: Header parameters dict to be updated.
+ :param queries: Query parameters tuple list to be updated.
+ :resource_path: A string representation of the HTTP request resource path.
+ :method: A string representation of the HTTP request method.
+ :body: A object representing the body of the HTTP request.
+ The object type is the return value of sanitize_for_serialization().
+ :param auth_setting: auth settings for the endpoint
+ """
+ if auth_setting['in'] == 'cookie':
+ headers['Cookie'] = auth_setting['value']
+ elif auth_setting['in'] == 'header':
+ if auth_setting['type'] != 'http-signature':
+ headers[auth_setting['key']] = auth_setting['value']
+ elif auth_setting['in'] == 'query':
+ queries.append((auth_setting['key'], auth_setting['value']))
+ else:
+ raise ApiValueError(
+ 'Authentication token must be in `query` or `header`'
+ )
+
+ def __deserialize_file(self, response):
+ """Deserializes body to file
+
+ Saves response body into a file in a temporary folder,
+ using the filename from the `Content-Disposition` header if provided.
+
+ :param response: RESTResponse.
+ :return: file path.
+ """
+ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
+ os.close(fd)
+ os.remove(path)
+
+ content_disposition = response.getheader("Content-Disposition")
+ if content_disposition:
+ filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
+ content_disposition).group(1)
+ path = os.path.join(os.path.dirname(path), filename)
+
+ with open(path, "wb") as f:
+ f.write(response.data)
+
+ return path
+
+ def __deserialize_primitive(self, data, klass):
+ """Deserializes string to primitive type.
+
+ :param data: str.
+ :param klass: class literal.
+
+ :return: int, long, float, str, bool.
+ """
+ try:
+ return klass(data)
+ except UnicodeEncodeError:
+ return str(data)
+ except TypeError:
+ return data
+
+ def __deserialize_object(self, value):
+ """Return an original value.
+
+ :return: object.
+ """
+ return value
+
+ def __deserialize_date(self, string):
+ """Deserializes string to date.
+
+ :param string: str.
+ :return: date.
+ """
+ try:
+ return parse(string).date()
+ except ImportError:
+ return string
+ except ValueError:
+ raise rest.ApiException(
+ status=0,
+ reason="Failed to parse `{0}` as date object".format(string)
+ )
+
+ def __deserialize_datetime(self, string):
+ """Deserializes string to datetime.
+
+ The string should be in iso8601 datetime format.
+
+ :param string: str.
+ :return: datetime.
+ """
+ try:
+ return parse(string)
+ except ImportError:
+ return string
+ except ValueError:
+ raise rest.ApiException(
+ status=0,
+ reason=(
+ "Failed to parse `{0}` as datetime object"
+ .format(string)
+ )
+ )
+
+ def __deserialize_model(self, data, klass):
+ """Deserializes list or dict to model.
+
+ :param data: dict, list.
+ :param klass: class literal.
+ :return: model object.
+ """
+
+ return klass.from_dict(data)
diff --git a/speechall/api_response.py b/speechall/api_response.py
new file mode 100644
index 0000000..a0b62b9
--- /dev/null
+++ b/speechall/api_response.py
@@ -0,0 +1,25 @@
+"""API response object."""
+
+from __future__ import annotations
+from typing import Any, Dict, Optional
+from pydantic import Field, StrictInt, StrictStr
+
+class ApiResponse:
+ """
+ API response object
+ """
+
+ status_code: Optional[StrictInt] = Field(None, description="HTTP status code")
+ headers: Optional[Dict[StrictStr, StrictStr]] = Field(None, description="HTTP headers")
+ data: Optional[Any] = Field(None, description="Deserialized data given the data type")
+ raw_data: Optional[Any] = Field(None, description="Raw data (HTTP response body)")
+
+ def __init__(self,
+ status_code=None,
+ headers=None,
+ data=None,
+ raw_data=None) -> None:
+ self.status_code = status_code
+ self.headers = headers
+ self.data = data
+ self.raw_data = raw_data
diff --git a/speechall/configuration.py b/speechall/configuration.py
new file mode 100644
index 0000000..958c5e2
--- /dev/null
+++ b/speechall/configuration.py
@@ -0,0 +1,443 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import copy
+import logging
+import multiprocessing
+import sys
+import urllib3
+
+import http.client as httplib
+
+JSON_SCHEMA_VALIDATION_KEYWORDS = {
+ 'multipleOf', 'maximum', 'exclusiveMaximum',
+ 'minimum', 'exclusiveMinimum', 'maxLength',
+ 'minLength', 'pattern', 'maxItems', 'minItems'
+}
+
+class Configuration:
+ """This class contains various settings of the API client.
+
+ :param host: Base url.
+ :param api_key: Dict to store API key(s).
+ Each entry in the dict specifies an API key.
+ The dict key is the name of the security scheme in the OAS specification.
+ The dict value is the API key secret.
+ :param api_key_prefix: Dict to store API prefix (e.g. Bearer).
+ The dict key is the name of the security scheme in the OAS specification.
+ The dict value is an API key prefix when generating the auth data.
+ :param username: Username for HTTP basic authentication.
+ :param password: Password for HTTP basic authentication.
+ :param access_token: Access token.
+ :param server_index: Index to servers configuration.
+ :param server_variables: Mapping with string values to replace variables in
+ templated server configuration. The validation of enums is performed for
+ variables with defined enum values before.
+ :param server_operation_index: Mapping from operation ID to an index to server
+ configuration.
+ :param server_operation_variables: Mapping from operation ID to a mapping with
+ string values to replace variables in templated server configuration.
+ The validation of enums is performed for variables with defined enum
+ values before.
+ :param ssl_ca_cert: str - the path to a file of concatenated CA certificates
+ in PEM format.
+
+ :Example:
+ """
+
+ _default = None
+
+ def __init__(self, host=None,
+ api_key=None, api_key_prefix=None,
+ username=None, password=None,
+ access_token=None,
+ server_index=None, server_variables=None,
+ server_operation_index=None, server_operation_variables=None,
+ ssl_ca_cert=None,
+ ) -> None:
+ """Constructor
+ """
+ self._base_path = "https://api.speechall.com/v1" if host is None else host
+ """Default Base url
+ """
+ self.server_index = 0 if server_index is None and host is None else server_index
+ self.server_operation_index = server_operation_index or {}
+ """Default server index
+ """
+ self.server_variables = server_variables or {}
+ self.server_operation_variables = server_operation_variables or {}
+ """Default server variables
+ """
+ self.temp_folder_path = None
+ """Temp file folder for downloading files
+ """
+ # Authentication Settings
+ self.api_key = {}
+ if api_key:
+ self.api_key = api_key
+ """dict to store API key(s)
+ """
+ self.api_key_prefix = {}
+ if api_key_prefix:
+ self.api_key_prefix = api_key_prefix
+ """dict to store API prefix (e.g. Bearer)
+ """
+ self.refresh_api_key_hook = None
+ """function hook to refresh API key if expired
+ """
+ self.username = username
+ """Username for HTTP basic authentication
+ """
+ self.password = password
+ """Password for HTTP basic authentication
+ """
+ self.access_token = access_token
+ """Access token
+ """
+ self.logger = {}
+ """Logging Settings
+ """
+ self.logger["package_logger"] = logging.getLogger("speechall")
+ self.logger["urllib3_logger"] = logging.getLogger("urllib3")
+ self.logger_format = '%(asctime)s %(levelname)s %(message)s'
+ """Log format
+ """
+ self.logger_stream_handler = None
+ """Log stream handler
+ """
+ self.logger_file_handler = None
+ """Log file handler
+ """
+ self.logger_file = None
+ """Debug file location
+ """
+ self.debug = False
+ """Debug switch
+ """
+
+ self.verify_ssl = True
+ """SSL/TLS verification
+ Set this to false to skip verifying SSL certificate when calling API
+ from https server.
+ """
+ self.ssl_ca_cert = ssl_ca_cert
+ """Set this to customize the certificate file to verify the peer.
+ """
+ self.cert_file = None
+ """client certificate file
+ """
+ self.key_file = None
+ """client key file
+ """
+ self.assert_hostname = None
+ """Set this to True/False to enable/disable SSL hostname verification.
+ """
+ self.tls_server_name = None
+ """SSL/TLS Server Name Indication (SNI)
+ Set this to the SNI value expected by the server.
+ """
+
+ self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
+ """urllib3 connection pool's maximum number of connections saved
+ per pool. urllib3 uses 1 connection as default value, but this is
+ not the best value when you are making a lot of possibly parallel
+ requests to the same host, which is often the case here.
+ cpu_count * 5 is used as default value to increase performance.
+ """
+
+ self.proxy = None
+ """Proxy URL
+ """
+ self.proxy_headers = None
+ """Proxy headers
+ """
+ self.safe_chars_for_path_param = ''
+ """Safe chars for path_param
+ """
+ self.retries = None
+ """Adding retries to override urllib3 default value 3
+ """
+ # Enable client side validation
+ self.client_side_validation = True
+
+ self.socket_options = None
+ """Options to pass down to the underlying urllib3 socket
+ """
+
+ self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z"
+ """datetime format
+ """
+
+ self.date_format = "%Y-%m-%d"
+ """date format
+ """
+
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ result = cls.__new__(cls)
+ memo[id(self)] = result
+ for k, v in self.__dict__.items():
+ if k not in ('logger', 'logger_file_handler'):
+ setattr(result, k, copy.deepcopy(v, memo))
+ # shallow copy of loggers
+ result.logger = copy.copy(self.logger)
+ # use setters to configure loggers
+ result.logger_file = self.logger_file
+ result.debug = self.debug
+ return result
+
+ def __setattr__(self, name, value):
+ object.__setattr__(self, name, value)
+
+ @classmethod
+ def set_default(cls, default):
+ """Set default instance of configuration.
+
+ It stores default configuration, which can be
+ returned by get_default_copy method.
+
+ :param default: object of Configuration
+ """
+ cls._default = default
+
+ @classmethod
+ def get_default_copy(cls):
+ """Deprecated. Please use `get_default` instead.
+
+ Deprecated. Please use `get_default` instead.
+
+ :return: The configuration object.
+ """
+ return cls.get_default()
+
+ @classmethod
+ def get_default(cls):
+ """Return the default configuration.
+
+ This method returns newly created, based on default constructor,
+ object of Configuration class or returns a copy of default
+ configuration.
+
+ :return: The configuration object.
+ """
+ if cls._default is None:
+ cls._default = Configuration()
+ return cls._default
+
+ @property
+ def logger_file(self):
+ """The logger file.
+
+ If the logger_file is None, then add stream handler and remove file
+ handler. Otherwise, add file handler and remove stream handler.
+
+ :param value: The logger_file path.
+ :type: str
+ """
+ return self.__logger_file
+
+ @logger_file.setter
+ def logger_file(self, value):
+ """The logger file.
+
+ If the logger_file is None, then add stream handler and remove file
+ handler. Otherwise, add file handler and remove stream handler.
+
+ :param value: The logger_file path.
+ :type: str
+ """
+ self.__logger_file = value
+ if self.__logger_file:
+ # If set logging file,
+ # then add file handler and remove stream handler.
+ self.logger_file_handler = logging.FileHandler(self.__logger_file)
+ self.logger_file_handler.setFormatter(self.logger_formatter)
+ for _, logger in self.logger.items():
+ logger.addHandler(self.logger_file_handler)
+
+ @property
+ def debug(self):
+ """Debug status
+
+ :param value: The debug status, True or False.
+ :type: bool
+ """
+ return self.__debug
+
+ @debug.setter
+ def debug(self, value):
+ """Debug status
+
+ :param value: The debug status, True or False.
+ :type: bool
+ """
+ self.__debug = value
+ if self.__debug:
+ # if debug status is True, turn on debug logging
+ for _, logger in self.logger.items():
+ logger.setLevel(logging.DEBUG)
+ # turn on httplib debug
+ httplib.HTTPConnection.debuglevel = 1
+ else:
+ # if debug status is False, turn off debug logging,
+ # setting log level to default `logging.WARNING`
+ for _, logger in self.logger.items():
+ logger.setLevel(logging.WARNING)
+ # turn off httplib debug
+ httplib.HTTPConnection.debuglevel = 0
+
+ @property
+ def logger_format(self):
+ """The logger format.
+
+ The logger_formatter will be updated when sets logger_format.
+
+ :param value: The format string.
+ :type: str
+ """
+ return self.__logger_format
+
+ @logger_format.setter
+ def logger_format(self, value):
+ """The logger format.
+
+ The logger_formatter will be updated when sets logger_format.
+
+ :param value: The format string.
+ :type: str
+ """
+ self.__logger_format = value
+ self.logger_formatter = logging.Formatter(self.__logger_format)
+
+ def get_api_key_with_prefix(self, identifier, alias=None):
+ """Gets API key (with prefix if set).
+
+ :param identifier: The identifier of apiKey.
+ :param alias: The alternative identifier of apiKey.
+ :return: The token for api key authentication.
+ """
+ if self.refresh_api_key_hook is not None:
+ self.refresh_api_key_hook(self)
+ key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
+ if key:
+ prefix = self.api_key_prefix.get(identifier)
+ if prefix:
+ return "%s %s" % (prefix, key)
+ else:
+ return key
+
+ def get_basic_auth_token(self):
+ """Gets HTTP basic authentication header (string).
+
+ :return: The token for basic HTTP authentication.
+ """
+ username = ""
+ if self.username is not None:
+ username = self.username
+ password = ""
+ if self.password is not None:
+ password = self.password
+ return urllib3.util.make_headers(
+ basic_auth=username + ':' + password
+ ).get('authorization')
+
+ def auth_settings(self):
+ """Gets Auth Settings dict for api client.
+
+ :return: The Auth Settings information dict.
+ """
+ auth = {}
+ if self.access_token is not None:
+ auth['bearerAuth'] = {
+ 'type': 'bearer',
+ 'in': 'header',
+ 'format': 'API Key',
+ 'key': 'Authorization',
+ 'value': 'Bearer ' + self.access_token
+ }
+ return auth
+
+ def to_debug_report(self):
+ """Gets the essential information for debugging.
+
+ :return: The report for debugging.
+ """
+ return "Python SDK Debug Report:\n"\
+ "OS: {env}\n"\
+ "Python Version: {pyversion}\n"\
+ "Version of the API: 0.1.0\n"\
+ "SDK Package Version: 0.1.0".\
+ format(env=sys.platform, pyversion=sys.version)
+
+ def get_host_settings(self):
+ """Gets an array of host settings
+
+ :return: An array of host settings
+ """
+ return [
+ {
+ 'url': "https://api.speechall.com/v1",
+ 'description': "The version 1 endpoint of the Speechall API.",
+ }
+ ]
+
+ def get_host_from_settings(self, index, variables=None, servers=None):
+ """Gets host URL based on the index and variables
+ :param index: array index of the host settings
+ :param variables: hash of variable and the corresponding value
+ :param servers: an array of host settings or None
+ :return: URL based on host settings
+ """
+ if index is None:
+ return self._base_path
+
+ variables = {} if variables is None else variables
+ servers = self.get_host_settings() if servers is None else servers
+
+ try:
+ server = servers[index]
+ except IndexError:
+ raise ValueError(
+ "Invalid index {0} when selecting the host settings. "
+ "Must be less than {1}".format(index, len(servers)))
+
+ url = server['url']
+
+ # go through variables and replace placeholders
+ for variable_name, variable in server.get('variables', {}).items():
+ used_value = variables.get(
+ variable_name, variable['default_value'])
+
+ if 'enum_values' in variable \
+ and used_value not in variable['enum_values']:
+ raise ValueError(
+ "The variable `{0}` in the host URL has invalid value "
+ "{1}. Must be {2}.".format(
+ variable_name, variables[variable_name],
+ variable['enum_values']))
+
+ url = url.replace("{" + variable_name + "}", used_value)
+
+ return url
+
+ @property
+ def host(self):
+ """Return generated host."""
+ return self.get_host_from_settings(self.server_index, variables=self.server_variables)
+
+ @host.setter
+ def host(self, value):
+ """Fix base path."""
+ self._base_path = value
+ self.server_index = None
diff --git a/speechall/exceptions.py b/speechall/exceptions.py
new file mode 100644
index 0000000..1b7f7c1
--- /dev/null
+++ b/speechall/exceptions.py
@@ -0,0 +1,166 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+class OpenApiException(Exception):
+ """The base exception class for all OpenAPIExceptions"""
+
+
+class ApiTypeError(OpenApiException, TypeError):
+ def __init__(self, msg, path_to_item=None, valid_classes=None,
+ key_type=None) -> None:
+ """ Raises an exception for TypeErrors
+
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (list): a list of keys an indices to get to the
+ current_item
+ None if unset
+ valid_classes (tuple): the primitive classes that current item
+ should be an instance of
+ None if unset
+ key_type (bool): False if our value is a value in a dict
+ True if it is a key in a dict
+ False if our item is an item in a list
+ None if unset
+ """
+ self.path_to_item = path_to_item
+ self.valid_classes = valid_classes
+ self.key_type = key_type
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiTypeError, self).__init__(full_msg)
+
+
+class ApiValueError(OpenApiException, ValueError):
+ def __init__(self, msg, path_to_item=None) -> None:
+ """
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (list) the path to the exception in the
+ received_data dict. None if unset
+ """
+
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiValueError, self).__init__(full_msg)
+
+
+class ApiAttributeError(OpenApiException, AttributeError):
+ def __init__(self, msg, path_to_item=None) -> None:
+ """
+ Raised when an attribute reference or assignment fails.
+
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (None/list) the path to the exception in the
+ received_data dict
+ """
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiAttributeError, self).__init__(full_msg)
+
+
+class ApiKeyError(OpenApiException, KeyError):
+ def __init__(self, msg, path_to_item=None) -> None:
+ """
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (None/list) the path to the exception in the
+ received_data dict
+ """
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiKeyError, self).__init__(full_msg)
+
+
+class ApiException(OpenApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None) -> None:
+ if http_resp:
+ self.status = http_resp.status
+ self.reason = http_resp.reason
+ self.body = http_resp.data
+ self.headers = http_resp.getheaders()
+ else:
+ self.status = status
+ self.reason = reason
+ self.body = None
+ self.headers = None
+
+ def __str__(self):
+ """Custom error messages for exception"""
+ error_message = "({0})\n"\
+ "Reason: {1}\n".format(self.status, self.reason)
+ if self.headers:
+ error_message += "HTTP response headers: {0}\n".format(
+ self.headers)
+
+ if self.body:
+ error_message += "HTTP response body: {0}\n".format(self.body)
+
+ return error_message
+
+class BadRequestException(ApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None) -> None:
+ super(BadRequestException, self).__init__(status, reason, http_resp)
+
+class NotFoundException(ApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None) -> None:
+ super(NotFoundException, self).__init__(status, reason, http_resp)
+
+
+class UnauthorizedException(ApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None) -> None:
+ super(UnauthorizedException, self).__init__(status, reason, http_resp)
+
+
+class ForbiddenException(ApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None) -> None:
+ super(ForbiddenException, self).__init__(status, reason, http_resp)
+
+
+class ServiceException(ApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None) -> None:
+ super(ServiceException, self).__init__(status, reason, http_resp)
+
+
+def render_path(path_to_item):
+ """Returns a string representation of a path"""
+ result = ""
+ for pth in path_to_item:
+ if isinstance(pth, int):
+ result += "[{0}]".format(pth)
+ else:
+ result += "['{0}']".format(pth)
+ return result
diff --git a/speechall/models/__init__.py b/speechall/models/__init__.py
new file mode 100644
index 0000000..b282388
--- /dev/null
+++ b/speechall/models/__init__.py
@@ -0,0 +1,38 @@
+# coding: utf-8
+
+# flake8: noqa
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+# import models into model package
+from speechall.models.base_transcription_configuration import BaseTranscriptionConfiguration
+from speechall.models.create_replacement_ruleset201_response import CreateReplacementRuleset201Response
+from speechall.models.create_replacement_ruleset_request import CreateReplacementRulesetRequest
+from speechall.models.error_response import ErrorResponse
+from speechall.models.exact_rule import ExactRule
+from speechall.models.open_ai_create_translation_request_model import OpenAICreateTranslationRequestModel
+from speechall.models.openai_compatible_create_transcription200_response import OpenaiCompatibleCreateTranscription200Response
+from speechall.models.openai_compatible_create_translation200_response import OpenaiCompatibleCreateTranslation200Response
+from speechall.models.regex_group_rule import RegexGroupRule
+from speechall.models.regex_rule import RegexRule
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration
+from speechall.models.replacement_rule import ReplacementRule
+from speechall.models.speech_to_text_model import SpeechToTextModel
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.transcription_detailed import TranscriptionDetailed
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcription_only_text import TranscriptionOnlyText
+from speechall.models.transcription_provider import TranscriptionProvider
+from speechall.models.transcription_response import TranscriptionResponse
+from speechall.models.transcription_segment import TranscriptionSegment
+from speechall.models.transcription_word import TranscriptionWord
diff --git a/speechall/models/base_transcription_configuration.py b/speechall/models/base_transcription_configuration.py
new file mode 100644
index 0000000..82ac264
--- /dev/null
+++ b/speechall/models/base_transcription_configuration.py
@@ -0,0 +1,106 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import List, Optional, Union
+from pydantic import BaseModel, Field, StrictBool, StrictStr, confloat, conint, conlist, validator
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+
+class BaseTranscriptionConfiguration(BaseModel):
+ """
+ Common configuration options for transcription, applicable to both direct uploads and remote URLs. # noqa: E501
+ """
+ model: TranscriptionModelIdentifier = Field(...)
+ language: Optional[TranscriptLanguageCode] = None
+ output_format: Optional[TranscriptOutputFormat] = None
+ ruleset_id: Optional[StrictStr] = Field(default=None, description="The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text.")
+ punctuation: Optional[StrictBool] = Field(default=True, description="Whether to add punctuation. Support varies by model (e.g., Deepgram, AssemblyAI). Defaults to `true`.")
+ timestamp_granularity: Optional[StrictStr] = Field(default='segment', description="Level of timestamp detail (`word` or `segment`). Defaults to `segment`.")
+ diarization: Optional[StrictBool] = Field(default=False, description="Enable speaker diarization. Defaults to `false`.")
+ initial_prompt: Optional[StrictStr] = Field(default=None, description="Optional text prompt to guide the transcription model. Support varies (e.g., OpenAI).")
+ temperature: Optional[Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)]] = Field(default=None, description="Controls output randomness for supported models (e.g., OpenAI). Value between 0 and 1.")
+ smart_format: Optional[StrictBool] = Field(default=None, description="Enable provider-specific smart formatting (e.g., Deepgram). Defaults vary.")
+ speakers_expected: Optional[conint(strict=True, le=10, ge=1)] = Field(default=None, description="Hint for the number of expected speakers for diarization (e.g., RevAI, Deepgram).")
+ custom_vocabulary: Optional[conlist(StrictStr)] = Field(default=None, description="List of custom words/phrases to improve recognition (e.g., Deepgram, AssemblyAI).")
+ __properties = ["model", "language", "output_format", "ruleset_id", "punctuation", "timestamp_granularity", "diarization", "initial_prompt", "temperature", "smart_format", "speakers_expected", "custom_vocabulary"]
+
+ @validator('timestamp_granularity')
+ def timestamp_granularity_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ if value not in ('word', 'segment',):
+ raise ValueError("must be one of enum values ('word', 'segment')")
+ return value
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> BaseTranscriptionConfiguration:
+ """Create an instance of BaseTranscriptionConfiguration from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> BaseTranscriptionConfiguration:
+ """Create an instance of BaseTranscriptionConfiguration from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return BaseTranscriptionConfiguration.parse_obj(obj)
+
+ _obj = BaseTranscriptionConfiguration.parse_obj({
+ "model": obj.get("model"),
+ "language": obj.get("language"),
+ "output_format": obj.get("output_format"),
+ "ruleset_id": obj.get("ruleset_id"),
+ "punctuation": obj.get("punctuation") if obj.get("punctuation") is not None else True,
+ "timestamp_granularity": obj.get("timestamp_granularity") if obj.get("timestamp_granularity") is not None else 'segment',
+ "diarization": obj.get("diarization") if obj.get("diarization") is not None else False,
+ "initial_prompt": obj.get("initial_prompt"),
+ "temperature": obj.get("temperature"),
+ "smart_format": obj.get("smart_format"),
+ "speakers_expected": obj.get("speakers_expected"),
+ "custom_vocabulary": obj.get("custom_vocabulary")
+ })
+ return _obj
+
+
diff --git a/speechall/models/create_replacement_ruleset201_response.py b/speechall/models/create_replacement_ruleset201_response.py
new file mode 100644
index 0000000..b3204eb
--- /dev/null
+++ b/speechall/models/create_replacement_ruleset201_response.py
@@ -0,0 +1,71 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+
+from pydantic import BaseModel, Field, StrictStr
+
+class CreateReplacementRuleset201Response(BaseModel):
+ """
+ CreateReplacementRuleset201Response
+ """
+ id: StrictStr = Field(default=..., description="The unique identifier (UUID) generated for this ruleset. Use this ID in the `ruleset_id` parameter of transcription requests.")
+ __properties = ["id"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> CreateReplacementRuleset201Response:
+ """Create an instance of CreateReplacementRuleset201Response from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> CreateReplacementRuleset201Response:
+ """Create an instance of CreateReplacementRuleset201Response from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return CreateReplacementRuleset201Response.parse_obj(obj)
+
+ _obj = CreateReplacementRuleset201Response.parse_obj({
+ "id": obj.get("id")
+ })
+ return _obj
+
+
diff --git a/speechall/models/create_replacement_ruleset_request.py b/speechall/models/create_replacement_ruleset_request.py
new file mode 100644
index 0000000..789b1cf
--- /dev/null
+++ b/speechall/models/create_replacement_ruleset_request.py
@@ -0,0 +1,81 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import List
+from pydantic import BaseModel, Field, StrictStr, conlist
+from speechall.models.replacement_rule import ReplacementRule
+
+class CreateReplacementRulesetRequest(BaseModel):
+ """
+ CreateReplacementRulesetRequest
+ """
+ name: StrictStr = Field(default=..., description="A user-defined name for this ruleset for easier identification.")
+ rules: conlist(ReplacementRule, min_items=1) = Field(default=..., description="An ordered array of replacement rules. Rules are applied in the order they appear in this list. See the `ReplacementRule` schema for different rule types (exact, regex, regex_group).")
+ __properties = ["name", "rules"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> CreateReplacementRulesetRequest:
+ """Create an instance of CreateReplacementRulesetRequest from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ # override the default output from pydantic by calling `to_dict()` of each item in rules (list)
+ _items = []
+ if self.rules:
+ for _item in self.rules:
+ if _item:
+ _items.append(_item.to_dict())
+ _dict['rules'] = _items
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> CreateReplacementRulesetRequest:
+ """Create an instance of CreateReplacementRulesetRequest from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return CreateReplacementRulesetRequest.parse_obj(obj)
+
+ _obj = CreateReplacementRulesetRequest.parse_obj({
+ "name": obj.get("name"),
+ "rules": [ReplacementRule.from_dict(_item) for _item in obj.get("rules")] if obj.get("rules") is not None else None
+ })
+ return _obj
+
+
diff --git a/speechall/models/error_response.py b/speechall/models/error_response.py
new file mode 100644
index 0000000..86fcf2c
--- /dev/null
+++ b/speechall/models/error_response.py
@@ -0,0 +1,83 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+
+from pydantic import BaseModel, Field, StrictStr
+
+class ErrorResponse(BaseModel):
+ """
+ Standard structure for error responses. May include additional properties depending on the error type. # noqa: E501
+ """
+ message: StrictStr = Field(default=..., description="A human-readable message describing the error.")
+ additional_properties: Dict[str, Any] = {}
+ __properties = ["message"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> ErrorResponse:
+ """Create an instance of ErrorResponse from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ "additional_properties"
+ },
+ exclude_none=True)
+ # puts key-value pairs in additional_properties in the top level
+ if self.additional_properties is not None:
+ for _key, _value in self.additional_properties.items():
+ _dict[_key] = _value
+
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> ErrorResponse:
+ """Create an instance of ErrorResponse from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return ErrorResponse.parse_obj(obj)
+
+ _obj = ErrorResponse.parse_obj({
+ "message": obj.get("message")
+ })
+ # store additional fields in additional_properties
+ for _key in obj.keys():
+ if _key not in cls.__properties:
+ _obj.additional_properties[_key] = obj.get(_key)
+
+ return _obj
+
+
diff --git a/speechall/models/exact_rule.py b/speechall/models/exact_rule.py
new file mode 100644
index 0000000..53111c2
--- /dev/null
+++ b/speechall/models/exact_rule.py
@@ -0,0 +1,84 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import Optional
+from pydantic import BaseModel, Field, StrictBool, StrictStr, validator
+
+class ExactRule(BaseModel):
+ """
+ Defines a replacement rule based on finding an exact string match. # noqa: E501
+ """
+ kind: StrictStr = Field(default=..., description="Discriminator field identifying the rule type as 'exact'.")
+ search: StrictStr = Field(default=..., description="The exact text string to search for within the transcription.")
+ replacement: StrictStr = Field(default=..., description="The text string to replace the found 'search' text with.")
+ case_sensitive: Optional[StrictBool] = Field(default=False, alias="caseSensitive", description="If true, the search will match only if the case is identical. If false (default), the search ignores case.")
+ __properties = ["kind", "search", "replacement", "caseSensitive"]
+
+ @validator('kind')
+ def kind_validate_enum(cls, value):
+ """Validates the enum"""
+ if value not in ('exact',):
+ raise ValueError("must be one of enum values ('exact')")
+ return value
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> ExactRule:
+ """Create an instance of ExactRule from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> ExactRule:
+ """Create an instance of ExactRule from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return ExactRule.parse_obj(obj)
+
+ _obj = ExactRule.parse_obj({
+ "kind": obj.get("kind"),
+ "search": obj.get("search"),
+ "replacement": obj.get("replacement"),
+ "case_sensitive": obj.get("caseSensitive") if obj.get("caseSensitive") is not None else False
+ })
+ return _obj
+
+
diff --git a/speechall/models/open_ai_create_translation_request_model.py b/speechall/models/open_ai_create_translation_request_model.py
new file mode 100644
index 0000000..613c7c5
--- /dev/null
+++ b/speechall/models/open_ai_create_translation_request_model.py
@@ -0,0 +1,139 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+from inspect import getfullargspec
+import json
+import pprint
+import re # noqa: F401
+
+from typing import Optional
+from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
+from typing import Union, Any, List, TYPE_CHECKING
+from pydantic import StrictStr, Field
+
+OPENAICREATETRANSLATIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"]
+
+class OpenAICreateTranslationRequestModel(BaseModel):
+ """
+ ID of the model to use. It follows the naming convention provider/model-name
+ """
+
+ # data type: str
+ anyof_schema_1_validator: Optional[StrictStr] = Field(default=None, description="A valid Speechall model identifier capable of translation.")
+ # data type: str
+ anyof_schema_2_validator: Optional[StrictStr] = None
+ if TYPE_CHECKING:
+ actual_instance: Union[str]
+ else:
+ actual_instance: Any
+ any_of_schemas: List[str] = Field(OPENAICREATETRANSLATIONREQUESTMODEL_ANY_OF_SCHEMAS, const=True)
+
+ class Config:
+ validate_assignment = True
+
+ def __init__(self, *args, **kwargs) -> None:
+ if args:
+ if len(args) > 1:
+ raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
+ if kwargs:
+ raise ValueError("If a position argument is used, keyword arguments cannot be used.")
+ super().__init__(actual_instance=args[0])
+ else:
+ super().__init__(**kwargs)
+
+ @validator('actual_instance')
+ def actual_instance_must_validate_anyof(cls, v):
+ instance = OpenAICreateTranslationRequestModel.construct()
+ error_messages = []
+ # validate data type: str
+ try:
+ instance.anyof_schema_1_validator = v
+ return v
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ # validate data type: str
+ try:
+ instance.anyof_schema_2_validator = v
+ return v
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ if error_messages:
+ # no match
+ raise ValueError("No match found when setting the actual_instance in OpenAICreateTranslationRequestModel with anyOf schemas: str. Details: " + ", ".join(error_messages))
+ else:
+ return v
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> OpenAICreateTranslationRequestModel:
+ return cls.from_json(json.dumps(obj))
+
+ @classmethod
+ def from_json(cls, json_str: str) -> OpenAICreateTranslationRequestModel:
+ """Returns the object represented by the json string"""
+ instance = OpenAICreateTranslationRequestModel.construct()
+ error_messages = []
+ # deserialize data into str
+ try:
+ # validation
+ instance.anyof_schema_1_validator = json.loads(json_str)
+ # assign value to actual_instance
+ instance.actual_instance = instance.anyof_schema_1_validator
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ # deserialize data into str
+ try:
+ # validation
+ instance.anyof_schema_2_validator = json.loads(json_str)
+ # assign value to actual_instance
+ instance.actual_instance = instance.anyof_schema_2_validator
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+
+ if error_messages:
+ # no match
+ raise ValueError("No match found when deserializing the JSON string into OpenAICreateTranslationRequestModel with anyOf schemas: str. Details: " + ", ".join(error_messages))
+ else:
+ return instance
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the actual instance"""
+ if self.actual_instance is None:
+ return "null"
+
+ to_json = getattr(self.actual_instance, "to_json", None)
+ if callable(to_json):
+ return self.actual_instance.to_json()
+ else:
+ return json.dumps(self.actual_instance)
+
+ def to_dict(self) -> dict:
+ """Returns the dict representation of the actual instance"""
+ if self.actual_instance is None:
+ return "null"
+
+ to_json = getattr(self.actual_instance, "to_json", None)
+ if callable(to_json):
+ return self.actual_instance.to_dict()
+ else:
+ # primitive type
+ return self.actual_instance
+
+ def to_str(self) -> str:
+ """Returns the string representation of the actual instance"""
+ return pprint.pformat(self.dict())
+
+
diff --git a/speechall/models/openai_compatible_create_transcription200_response.py b/speechall/models/openai_compatible_create_transcription200_response.py
new file mode 100644
index 0000000..ca0d08f
--- /dev/null
+++ b/speechall/models/openai_compatible_create_transcription200_response.py
@@ -0,0 +1,139 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+from inspect import getfullargspec
+import json
+import pprint
+import re # noqa: F401
+
+from typing import Any, List, Optional
+from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
+from typing import Union, Any, List, TYPE_CHECKING
+from pydantic import StrictStr, Field
+
+OPENAICOMPATIBLECREATETRANSCRIPTION200RESPONSE_ONE_OF_SCHEMAS = ["OpenAICreateTranscriptionResponseJson", "OpenAICreateTranscriptionResponseVerboseJson"]
+
+class OpenaiCompatibleCreateTranscription200Response(BaseModel):
+ """
+ OpenaiCompatibleCreateTranscription200Response
+ """
+ # data type: OpenAICreateTranscriptionResponseVerboseJson
+ oneof_schema_1_validator: Optional[OpenAICreateTranscriptionResponseVerboseJson] = None
+ # data type: OpenAICreateTranscriptionResponseJson
+ oneof_schema_2_validator: Optional[OpenAICreateTranscriptionResponseJson] = None
+ if TYPE_CHECKING:
+ actual_instance: Union[OpenAICreateTranscriptionResponseJson, OpenAICreateTranscriptionResponseVerboseJson]
+ else:
+ actual_instance: Any
+ one_of_schemas: List[str] = Field(OPENAICOMPATIBLECREATETRANSCRIPTION200RESPONSE_ONE_OF_SCHEMAS, const=True)
+
+ class Config:
+ validate_assignment = True
+
+ def __init__(self, *args, **kwargs) -> None:
+ if args:
+ if len(args) > 1:
+ raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
+ if kwargs:
+ raise ValueError("If a position argument is used, keyword arguments cannot be used.")
+ super().__init__(actual_instance=args[0])
+ else:
+ super().__init__(**kwargs)
+
+ @validator('actual_instance')
+ def actual_instance_must_validate_oneof(cls, v):
+ instance = OpenaiCompatibleCreateTranscription200Response.construct()
+ error_messages = []
+ match = 0
+ # validate data type: OpenAICreateTranscriptionResponseVerboseJson
+ if not isinstance(v, OpenAICreateTranscriptionResponseVerboseJson):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `OpenAICreateTranscriptionResponseVerboseJson`")
+ else:
+ match += 1
+ # validate data type: OpenAICreateTranscriptionResponseJson
+ if not isinstance(v, OpenAICreateTranscriptionResponseJson):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `OpenAICreateTranscriptionResponseJson`")
+ else:
+ match += 1
+ if match > 1:
+ # more than 1 match
+ raise ValueError("Multiple matches found when setting `actual_instance` in OpenaiCompatibleCreateTranscription200Response with oneOf schemas: OpenAICreateTranscriptionResponseJson, OpenAICreateTranscriptionResponseVerboseJson. Details: " + ", ".join(error_messages))
+ elif match == 0:
+ # no match
+ raise ValueError("No match found when setting `actual_instance` in OpenaiCompatibleCreateTranscription200Response with oneOf schemas: OpenAICreateTranscriptionResponseJson, OpenAICreateTranscriptionResponseVerboseJson. Details: " + ", ".join(error_messages))
+ else:
+ return v
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> OpenaiCompatibleCreateTranscription200Response:
+ return cls.from_json(json.dumps(obj))
+
+ @classmethod
+ def from_json(cls, json_str: str) -> OpenaiCompatibleCreateTranscription200Response:
+ """Returns the object represented by the json string"""
+ instance = OpenaiCompatibleCreateTranscription200Response.construct()
+ error_messages = []
+ match = 0
+
+ # deserialize data into OpenAICreateTranscriptionResponseVerboseJson
+ try:
+ instance.actual_instance = OpenAICreateTranscriptionResponseVerboseJson.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ # deserialize data into OpenAICreateTranscriptionResponseJson
+ try:
+ instance.actual_instance = OpenAICreateTranscriptionResponseJson.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+
+ if match > 1:
+ # more than 1 match
+ raise ValueError("Multiple matches found when deserializing the JSON string into OpenaiCompatibleCreateTranscription200Response with oneOf schemas: OpenAICreateTranscriptionResponseJson, OpenAICreateTranscriptionResponseVerboseJson. Details: " + ", ".join(error_messages))
+ elif match == 0:
+ # no match
+ raise ValueError("No match found when deserializing the JSON string into OpenaiCompatibleCreateTranscription200Response with oneOf schemas: OpenAICreateTranscriptionResponseJson, OpenAICreateTranscriptionResponseVerboseJson. Details: " + ", ".join(error_messages))
+ else:
+ return instance
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the actual instance"""
+ if self.actual_instance is None:
+ return "null"
+
+ to_json = getattr(self.actual_instance, "to_json", None)
+ if callable(to_json):
+ return self.actual_instance.to_json()
+ else:
+ return json.dumps(self.actual_instance)
+
+ def to_dict(self) -> dict:
+ """Returns the dict representation of the actual instance"""
+ if self.actual_instance is None:
+ return None
+
+ to_dict = getattr(self.actual_instance, "to_dict", None)
+ if callable(to_dict):
+ return self.actual_instance.to_dict()
+ else:
+ # primitive type
+ return self.actual_instance
+
+ def to_str(self) -> str:
+ """Returns the string representation of the actual instance"""
+ return pprint.pformat(self.dict())
+
+
diff --git a/speechall/models/openai_compatible_create_translation200_response.py b/speechall/models/openai_compatible_create_translation200_response.py
new file mode 100644
index 0000000..7a7c3e1
--- /dev/null
+++ b/speechall/models/openai_compatible_create_translation200_response.py
@@ -0,0 +1,139 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+from inspect import getfullargspec
+import json
+import pprint
+import re # noqa: F401
+
+from typing import Any, List, Optional
+from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
+from typing import Union, Any, List, TYPE_CHECKING
+from pydantic import StrictStr, Field
+
+OPENAICOMPATIBLECREATETRANSLATION200RESPONSE_ONE_OF_SCHEMAS = ["OpenAICreateTranslationResponseJson", "OpenAICreateTranslationResponseVerboseJson"]
+
+class OpenaiCompatibleCreateTranslation200Response(BaseModel):
+ """
+ OpenaiCompatibleCreateTranslation200Response
+ """
+ # data type: OpenAICreateTranslationResponseVerboseJson
+ oneof_schema_1_validator: Optional[OpenAICreateTranslationResponseVerboseJson] = None
+ # data type: OpenAICreateTranslationResponseJson
+ oneof_schema_2_validator: Optional[OpenAICreateTranslationResponseJson] = None
+ if TYPE_CHECKING:
+ actual_instance: Union[OpenAICreateTranslationResponseJson, OpenAICreateTranslationResponseVerboseJson]
+ else:
+ actual_instance: Any
+ one_of_schemas: List[str] = Field(OPENAICOMPATIBLECREATETRANSLATION200RESPONSE_ONE_OF_SCHEMAS, const=True)
+
+ class Config:
+ validate_assignment = True
+
+ def __init__(self, *args, **kwargs) -> None:
+ if args:
+ if len(args) > 1:
+ raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
+ if kwargs:
+ raise ValueError("If a position argument is used, keyword arguments cannot be used.")
+ super().__init__(actual_instance=args[0])
+ else:
+ super().__init__(**kwargs)
+
+ @validator('actual_instance')
+ def actual_instance_must_validate_oneof(cls, v):
+ instance = OpenaiCompatibleCreateTranslation200Response.construct()
+ error_messages = []
+ match = 0
+ # validate data type: OpenAICreateTranslationResponseVerboseJson
+ if not isinstance(v, OpenAICreateTranslationResponseVerboseJson):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `OpenAICreateTranslationResponseVerboseJson`")
+ else:
+ match += 1
+ # validate data type: OpenAICreateTranslationResponseJson
+ if not isinstance(v, OpenAICreateTranslationResponseJson):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `OpenAICreateTranslationResponseJson`")
+ else:
+ match += 1
+ if match > 1:
+ # more than 1 match
+ raise ValueError("Multiple matches found when setting `actual_instance` in OpenaiCompatibleCreateTranslation200Response with oneOf schemas: OpenAICreateTranslationResponseJson, OpenAICreateTranslationResponseVerboseJson. Details: " + ", ".join(error_messages))
+ elif match == 0:
+ # no match
+ raise ValueError("No match found when setting `actual_instance` in OpenaiCompatibleCreateTranslation200Response with oneOf schemas: OpenAICreateTranslationResponseJson, OpenAICreateTranslationResponseVerboseJson. Details: " + ", ".join(error_messages))
+ else:
+ return v
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> OpenaiCompatibleCreateTranslation200Response:
+ return cls.from_json(json.dumps(obj))
+
+ @classmethod
+ def from_json(cls, json_str: str) -> OpenaiCompatibleCreateTranslation200Response:
+ """Returns the object represented by the json string"""
+ instance = OpenaiCompatibleCreateTranslation200Response.construct()
+ error_messages = []
+ match = 0
+
+ # deserialize data into OpenAICreateTranslationResponseVerboseJson
+ try:
+ instance.actual_instance = OpenAICreateTranslationResponseVerboseJson.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ # deserialize data into OpenAICreateTranslationResponseJson
+ try:
+ instance.actual_instance = OpenAICreateTranslationResponseJson.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+
+ if match > 1:
+ # more than 1 match
+ raise ValueError("Multiple matches found when deserializing the JSON string into OpenaiCompatibleCreateTranslation200Response with oneOf schemas: OpenAICreateTranslationResponseJson, OpenAICreateTranslationResponseVerboseJson. Details: " + ", ".join(error_messages))
+ elif match == 0:
+ # no match
+ raise ValueError("No match found when deserializing the JSON string into OpenaiCompatibleCreateTranslation200Response with oneOf schemas: OpenAICreateTranslationResponseJson, OpenAICreateTranslationResponseVerboseJson. Details: " + ", ".join(error_messages))
+ else:
+ return instance
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the actual instance"""
+ if self.actual_instance is None:
+ return "null"
+
+ to_json = getattr(self.actual_instance, "to_json", None)
+ if callable(to_json):
+ return self.actual_instance.to_json()
+ else:
+ return json.dumps(self.actual_instance)
+
+ def to_dict(self) -> dict:
+ """Returns the dict representation of the actual instance"""
+ if self.actual_instance is None:
+ return None
+
+ to_dict = getattr(self.actual_instance, "to_dict", None)
+ if callable(to_dict):
+ return self.actual_instance.to_dict()
+ else:
+ # primitive type
+ return self.actual_instance
+
+ def to_str(self) -> str:
+ """Returns the string representation of the actual instance"""
+ return pprint.pformat(self.dict())
+
+
diff --git a/speechall/models/regex_group_rule.py b/speechall/models/regex_group_rule.py
new file mode 100644
index 0000000..07a077e
--- /dev/null
+++ b/speechall/models/regex_group_rule.py
@@ -0,0 +1,95 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import Dict, List, Optional
+from pydantic import BaseModel, Field, StrictStr, conlist, validator
+
+class RegexGroupRule(BaseModel):
+ """
+ Defines a replacement rule that uses regex capture groups to apply different replacements to different parts of the matched text. # noqa: E501
+ """
+ kind: StrictStr = Field(default=..., description="Discriminator field identifying the rule type as 'regex_group'.")
+ pattern: StrictStr = Field(default=..., description="The regular expression pattern containing capture groups `(...)`. The entire pattern must match for replacements to occur.")
+ group_replacements: Dict[str, StrictStr] = Field(default=..., alias="groupReplacements", description="An object where keys are capture group numbers (as strings, e.g., \"1\", \"2\") and values are the respective replacement strings for those groups. Groups not listed are kept as matched. The entire match is reconstructed using these replacements.")
+ flags: Optional[conlist(StrictStr)] = Field(default=None, description="An array of flags to modify the regex behavior.")
+ __properties = ["kind", "pattern", "groupReplacements", "flags"]
+
+ @validator('kind')
+ def kind_validate_enum(cls, value):
+ """Validates the enum"""
+ if value not in ('regex_group',):
+ raise ValueError("must be one of enum values ('regex_group')")
+ return value
+
+ @validator('flags')
+ def flags_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ for i in value:
+ if i not in ('i', 'm', 's', 'x', 'u',):
+ raise ValueError("each list item must be one of ('i', 'm', 's', 'x', 'u')")
+ return value
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> RegexGroupRule:
+ """Create an instance of RegexGroupRule from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> RegexGroupRule:
+ """Create an instance of RegexGroupRule from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return RegexGroupRule.parse_obj(obj)
+
+ _obj = RegexGroupRule.parse_obj({
+ "kind": obj.get("kind"),
+ "pattern": obj.get("pattern"),
+ "group_replacements": obj.get("groupReplacements"),
+ "flags": obj.get("flags")
+ })
+ return _obj
+
+
diff --git a/speechall/models/regex_rule.py b/speechall/models/regex_rule.py
new file mode 100644
index 0000000..711cbce
--- /dev/null
+++ b/speechall/models/regex_rule.py
@@ -0,0 +1,95 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import List, Optional
+from pydantic import BaseModel, Field, StrictStr, conlist, validator
+
+class RegexRule(BaseModel):
+ """
+ Defines a replacement rule based on matching a regular expression pattern. # noqa: E501
+ """
+ kind: StrictStr = Field(default=..., description="Discriminator field identifying the rule type as 'regex'.")
+ pattern: StrictStr = Field(default=..., description="The regular expression pattern to search for. Uses standard regex syntax (implementation specific, often PCRE-like). Remember to escape special characters if needed (e.g., `\\\\.` for a literal dot).")
+ replacement: StrictStr = Field(default=..., description="The replacement text. Can include backreferences to capture groups from the pattern, like `$1`, `$2`, etc. A literal `$` should be escaped (e.g., `$$`).")
+ flags: Optional[conlist(StrictStr)] = Field(default=None, description="An array of flags to modify the regex behavior (e.g., 'i' for case-insensitivity).")
+ __properties = ["kind", "pattern", "replacement", "flags"]
+
+ @validator('kind')
+ def kind_validate_enum(cls, value):
+ """Validates the enum"""
+ if value not in ('regex',):
+ raise ValueError("must be one of enum values ('regex')")
+ return value
+
+ @validator('flags')
+ def flags_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ for i in value:
+ if i not in ('i', 'm', 's', 'x', 'u',):
+ raise ValueError("each list item must be one of ('i', 'm', 's', 'x', 'u')")
+ return value
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> RegexRule:
+ """Create an instance of RegexRule from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> RegexRule:
+ """Create an instance of RegexRule from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return RegexRule.parse_obj(obj)
+
+ _obj = RegexRule.parse_obj({
+ "kind": obj.get("kind"),
+ "pattern": obj.get("pattern"),
+ "replacement": obj.get("replacement"),
+ "flags": obj.get("flags")
+ })
+ return _obj
+
+
diff --git a/speechall/models/remote_transcription_configuration.py b/speechall/models/remote_transcription_configuration.py
new file mode 100644
index 0000000..7406244
--- /dev/null
+++ b/speechall/models/remote_transcription_configuration.py
@@ -0,0 +1,118 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import List, Optional, Union
+from pydantic import BaseModel, Field, StrictBool, StrictStr, confloat, conint, conlist, validator
+from speechall.models.replacement_rule import ReplacementRule
+from speechall.models.transcript_language_code import TranscriptLanguageCode
+from speechall.models.transcript_output_format import TranscriptOutputFormat
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+
+class RemoteTranscriptionConfiguration(BaseModel):
+ """
+ Configuration options for transcribing audio specified by a remote URL via the `/transcribe-remote` endpoint. # noqa: E501
+ """
+ model: TranscriptionModelIdentifier = Field(...)
+ language: Optional[TranscriptLanguageCode] = None
+ output_format: Optional[TranscriptOutputFormat] = None
+ ruleset_id: Optional[StrictStr] = Field(default=None, description="The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text.")
+ punctuation: Optional[StrictBool] = Field(default=True, description="Whether to add punctuation. Support varies by model (e.g., Deepgram, AssemblyAI). Defaults to `true`.")
+ timestamp_granularity: Optional[StrictStr] = Field(default='segment', description="Level of timestamp detail (`word` or `segment`). Defaults to `segment`.")
+ diarization: Optional[StrictBool] = Field(default=False, description="Enable speaker diarization. Defaults to `false`.")
+ initial_prompt: Optional[StrictStr] = Field(default=None, description="Optional text prompt to guide the transcription model. Support varies (e.g., OpenAI).")
+ temperature: Optional[Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)]] = Field(default=None, description="Controls output randomness for supported models (e.g., OpenAI). Value between 0 and 1.")
+ smart_format: Optional[StrictBool] = Field(default=None, description="Enable provider-specific smart formatting (e.g., Deepgram). Defaults vary.")
+ speakers_expected: Optional[conint(strict=True, le=10, ge=1)] = Field(default=None, description="Hint for the number of expected speakers for diarization (e.g., RevAI, Deepgram).")
+ custom_vocabulary: Optional[conlist(StrictStr)] = Field(default=None, description="List of custom words/phrases to improve recognition (e.g., Deepgram, AssemblyAI).")
+ file_url: StrictStr = Field(default=..., description="The publicly accessible URL of the audio file to transcribe. The API server must be able to fetch the audio from this URL.")
+ replacement_ruleset: Optional[conlist(ReplacementRule)] = Field(default=None, description="An array of replacement rules to be applied directly to this transcription request, in order. This allows defining rules inline instead of (or in addition to) using a pre-saved `ruleset_id`.")
+ __properties = ["model", "language", "output_format", "ruleset_id", "punctuation", "timestamp_granularity", "diarization", "initial_prompt", "temperature", "smart_format", "speakers_expected", "custom_vocabulary", "file_url", "replacement_ruleset"]
+
+ @validator('timestamp_granularity')
+ def timestamp_granularity_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ if value not in ('word', 'segment',):
+ raise ValueError("must be one of enum values ('word', 'segment')")
+ return value
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> RemoteTranscriptionConfiguration:
+ """Create an instance of RemoteTranscriptionConfiguration from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ # override the default output from pydantic by calling `to_dict()` of each item in replacement_ruleset (list)
+ _items = []
+ if self.replacement_ruleset:
+ for _item in self.replacement_ruleset:
+ if _item:
+ _items.append(_item.to_dict())
+ _dict['replacement_ruleset'] = _items
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> RemoteTranscriptionConfiguration:
+ """Create an instance of RemoteTranscriptionConfiguration from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return RemoteTranscriptionConfiguration.parse_obj(obj)
+
+ _obj = RemoteTranscriptionConfiguration.parse_obj({
+ "model": obj.get("model"),
+ "language": obj.get("language"),
+ "output_format": obj.get("output_format"),
+ "ruleset_id": obj.get("ruleset_id"),
+ "punctuation": obj.get("punctuation") if obj.get("punctuation") is not None else True,
+ "timestamp_granularity": obj.get("timestamp_granularity") if obj.get("timestamp_granularity") is not None else 'segment',
+ "diarization": obj.get("diarization") if obj.get("diarization") is not None else False,
+ "initial_prompt": obj.get("initial_prompt"),
+ "temperature": obj.get("temperature"),
+ "smart_format": obj.get("smart_format"),
+ "speakers_expected": obj.get("speakers_expected"),
+ "custom_vocabulary": obj.get("custom_vocabulary"),
+ "file_url": obj.get("file_url"),
+ "replacement_ruleset": [ReplacementRule.from_dict(_item) for _item in obj.get("replacement_ruleset")] if obj.get("replacement_ruleset") is not None else None
+ })
+ return _obj
+
+
diff --git a/speechall/models/replacement_rule.py b/speechall/models/replacement_rule.py
new file mode 100644
index 0000000..fa2733f
--- /dev/null
+++ b/speechall/models/replacement_rule.py
@@ -0,0 +1,158 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+from inspect import getfullargspec
+import json
+import pprint
+import re # noqa: F401
+
+from typing import Any, List, Optional
+from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
+from speechall.models.exact_rule import ExactRule
+from speechall.models.regex_group_rule import RegexGroupRule
+from speechall.models.regex_rule import RegexRule
+from typing import Union, Any, List, TYPE_CHECKING
+from pydantic import StrictStr, Field
+
+REPLACEMENTRULE_ONE_OF_SCHEMAS = ["ExactRule", "RegexGroupRule", "RegexRule"]
+
+class ReplacementRule(BaseModel):
+ """
+ Defines a single rule for finding and replacing text in a transcription. Use one of the specific rule types (`ExactRule`, `RegexRule`, `RegexGroupRule`). The `kind` property acts as a discriminator.
+ """
+ # data type: ExactRule
+ oneof_schema_1_validator: Optional[ExactRule] = None
+ # data type: RegexRule
+ oneof_schema_2_validator: Optional[RegexRule] = None
+ # data type: RegexGroupRule
+ oneof_schema_3_validator: Optional[RegexGroupRule] = None
+ if TYPE_CHECKING:
+ actual_instance: Union[ExactRule, RegexGroupRule, RegexRule]
+ else:
+ actual_instance: Any
+ one_of_schemas: List[str] = Field(REPLACEMENTRULE_ONE_OF_SCHEMAS, const=True)
+
+ class Config:
+ validate_assignment = True
+
+ discriminator_value_class_map = {
+ }
+
+ def __init__(self, *args, **kwargs) -> None:
+ if args:
+ if len(args) > 1:
+ raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
+ if kwargs:
+ raise ValueError("If a position argument is used, keyword arguments cannot be used.")
+ super().__init__(actual_instance=args[0])
+ else:
+ super().__init__(**kwargs)
+
+ @validator('actual_instance')
+ def actual_instance_must_validate_oneof(cls, v):
+ instance = ReplacementRule.construct()
+ error_messages = []
+ match = 0
+ # validate data type: ExactRule
+ if not isinstance(v, ExactRule):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `ExactRule`")
+ else:
+ match += 1
+ # validate data type: RegexRule
+ if not isinstance(v, RegexRule):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `RegexRule`")
+ else:
+ match += 1
+ # validate data type: RegexGroupRule
+ if not isinstance(v, RegexGroupRule):
+ error_messages.append(f"Error! Input type `{type(v)}` is not `RegexGroupRule`")
+ else:
+ match += 1
+ if match > 1:
+ # more than 1 match
+ raise ValueError("Multiple matches found when setting `actual_instance` in ReplacementRule with oneOf schemas: ExactRule, RegexGroupRule, RegexRule. Details: " + ", ".join(error_messages))
+ elif match == 0:
+ # no match
+ raise ValueError("No match found when setting `actual_instance` in ReplacementRule with oneOf schemas: ExactRule, RegexGroupRule, RegexRule. Details: " + ", ".join(error_messages))
+ else:
+ return v
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> ReplacementRule:
+ return cls.from_json(json.dumps(obj))
+
+ @classmethod
+ def from_json(cls, json_str: str) -> ReplacementRule:
+ """Returns the object represented by the json string"""
+ instance = ReplacementRule.construct()
+ error_messages = []
+ match = 0
+
+ # deserialize data into ExactRule
+ try:
+ instance.actual_instance = ExactRule.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ # deserialize data into RegexRule
+ try:
+ instance.actual_instance = RegexRule.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+ # deserialize data into RegexGroupRule
+ try:
+ instance.actual_instance = RegexGroupRule.from_json(json_str)
+ match += 1
+ except (ValidationError, ValueError) as e:
+ error_messages.append(str(e))
+
+ if match > 1:
+ # more than 1 match
+ raise ValueError("Multiple matches found when deserializing the JSON string into ReplacementRule with oneOf schemas: ExactRule, RegexGroupRule, RegexRule. Details: " + ", ".join(error_messages))
+ elif match == 0:
+ # no match
+ raise ValueError("No match found when deserializing the JSON string into ReplacementRule with oneOf schemas: ExactRule, RegexGroupRule, RegexRule. Details: " + ", ".join(error_messages))
+ else:
+ return instance
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the actual instance"""
+ if self.actual_instance is None:
+ return "null"
+
+ to_json = getattr(self.actual_instance, "to_json", None)
+ if callable(to_json):
+ return self.actual_instance.to_json()
+ else:
+ return json.dumps(self.actual_instance)
+
+ def to_dict(self) -> dict:
+ """Returns the dict representation of the actual instance"""
+ if self.actual_instance is None:
+ return None
+
+ to_dict = getattr(self.actual_instance, "to_dict", None)
+ if callable(to_dict):
+ return self.actual_instance.to_dict()
+ else:
+ # primitive type
+ return self.actual_instance
+
+ def to_str(self) -> str:
+ """Returns the string representation of the actual instance"""
+ return pprint.pformat(self.dict())
+
+
diff --git a/speechall/models/speech_to_text_model.py b/speechall/models/speech_to_text_model.py
new file mode 100644
index 0000000..059edcd
--- /dev/null
+++ b/speechall/models/speech_to_text_model.py
@@ -0,0 +1,294 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+from datetime import date, datetime
+from typing import List, Optional, Union
+from pydantic import BaseModel, Field, StrictBool, StrictFloat, StrictInt, StrictStr, conlist, validator
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier
+from speechall.models.transcription_provider import TranscriptionProvider
+
+class SpeechToTextModel(BaseModel):
+ """
+ Describes an available speech-to-text model, its provider, capabilities, and characteristics. # noqa: E501
+ """
+ id: TranscriptionModelIdentifier = Field(...)
+ display_name: StrictStr = Field(default=..., description="A user-friendly name for the model.")
+ provider: TranscriptionProvider = Field(...)
+ description: Optional[StrictStr] = Field(default=None, description="A brief description of the model, its intended use case, or version notes.")
+ cost_per_second_usd: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The cost per second of audio processed in USD.")
+ is_available: StrictBool = Field(default=..., description="Indicates whether the model is currently available for use.")
+ supported_languages: Optional[conlist(StrictStr)] = Field(default=None, description="A list of language codes (preferably BCP 47, e.g., \"en-US\", \"en-GB\", \"es-ES\") supported by this model. May include `auto` if automatic language detection is supported across multiple languages within a single audio file. ")
+ punctuation: Optional[StrictBool] = Field(default=None, description="Indicates whether the model generally supports automatic punctuation insertion.")
+ diarization: Optional[StrictBool] = Field(default=None, description="Indicates whether the model generally supports speaker diarization (identifying different speakers).")
+ streamable: Optional[StrictBool] = Field(default=None, description="Indicates whether the model can be used for real-time streaming transcription via a WebSocket connection (if offered by Speechall).")
+ real_time_factor: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="An approximate measure of processing speed for batch processing. Defined as (audio duration) / (processing time). A higher value means faster processing (e.g., RTF=2 means it processes 1 second of audio in 0.5 seconds). May not be available for all models or streaming scenarios. ")
+ max_duration_seconds: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The maximum duration of a single audio file (in seconds) that the model can reliably process in one request. May vary by provider or plan.")
+ max_file_size_bytes: Optional[StrictInt] = Field(default=None, description="The maximum size of a single audio file (in bytes) that can be uploaded for processing by this model. May vary by provider or plan.")
+ version: Optional[StrictStr] = Field(default=None, description="The specific version identifier for the model.")
+ release_date: Optional[date] = Field(default=None, description="The date when this specific version of the model was released or last updated.")
+ model_type: Optional[StrictStr] = Field(default=None, description="The primary type or training domain of the model. Helps identify suitability for different audio types.")
+ accuracy_tier: Optional[StrictStr] = Field(default=None, description="A general indication of the model's expected accuracy level relative to other models. Not a guaranteed metric.")
+ supported_audio_encodings: Optional[conlist(StrictStr)] = Field(default=None, description="A list of audio encodings that this model supports or is optimized for (e.g., LINEAR16, FLAC, MP3, Opus).")
+ supported_sample_rates: Optional[conlist(StrictInt)] = Field(default=None, description="A list of audio sample rates (in Hz) that this model supports or is optimized for.")
+ speaker_labels: Optional[StrictBool] = Field(default=None, description="Indicates whether the model can provide speaker labels for the transcription.")
+ word_timestamps: Optional[StrictBool] = Field(default=None, description="Indicates whether the model can provide timestamps for individual words.")
+ confidence_scores: Optional[StrictBool] = Field(default=None, description="Indicates whether the model provides confidence scores for the transcription or individual words.")
+ language_detection: Optional[StrictBool] = Field(default=None, description="Indicates whether the model supports automatic language detection for input audio.")
+ custom_vocabulary_support: Optional[StrictBool] = Field(default=None, description="Indicates if the model can leverage a custom vocabulary or language model adaptation.")
+ profanity_filtering: Optional[StrictBool] = Field(default=None, description="Indicates if the model supports filtering or masking of profanity.")
+ noise_reduction: Optional[StrictBool] = Field(default=None, description="Indicates if the model supports noise reduction.")
+ supports_srt: StrictBool = Field(default=..., description="Indicates whether the model supports SRT subtitle format output.")
+ supports_vtt: StrictBool = Field(default=..., description="Indicates whether the model supports VTT subtitle format output.")
+ voice_activity_detection: Optional[StrictBool] = Field(default=None, description="Indicates whether the model supports voice activity detection (VAD) to identify speech segments.")
+ __properties = ["id", "display_name", "provider", "description", "cost_per_second_usd", "is_available", "supported_languages", "punctuation", "diarization", "streamable", "real_time_factor", "max_duration_seconds", "max_file_size_bytes", "version", "release_date", "model_type", "accuracy_tier", "supported_audio_encodings", "supported_sample_rates", "speaker_labels", "word_timestamps", "confidence_scores", "language_detection", "custom_vocabulary_support", "profanity_filtering", "noise_reduction", "supports_srt", "supports_vtt", "voice_activity_detection"]
+
+ @validator('model_type')
+ def model_type_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ if value not in ('general', 'phone_call', 'video', 'command_and_search', 'medical', 'legal', 'voicemail', 'meeting',):
+ raise ValueError("must be one of enum values ('general', 'phone_call', 'video', 'command_and_search', 'medical', 'legal', 'voicemail', 'meeting')")
+ return value
+
+ @validator('accuracy_tier')
+ def accuracy_tier_validate_enum(cls, value):
+ """Validates the enum"""
+ if value is None:
+ return value
+
+ if value not in ('basic', 'standard', 'enhanced', 'premium',):
+ raise ValueError("must be one of enum values ('basic', 'standard', 'enhanced', 'premium')")
+ return value
+
+ # Added this to fix the release_date field
+ @validator('release_date', pre=True)
+ def parse_release_date(cls, value):
+ """Parse release_date from various string formats"""
+ if value is None or isinstance(value, date):
+ return value
+
+ if isinstance(value, str):
+ # Try common date formats
+ date_formats = [
+ '%Y-%m-%d', # ISO format: 2023-12-25
+ '%m/%d/%Y', # US format: 12/25/2023
+ '%d/%m/%Y', # European format: 25/12/2023
+ '%Y-%m-%dT%H:%M:%S', # ISO datetime format
+ '%Y-%m-%dT%H:%M:%SZ',# ISO datetime with Z
+ '%Y-%m-%d %H:%M:%S', # Space separated datetime
+ ]
+
+ for fmt in date_formats:
+ try:
+ parsed_datetime = datetime.strptime(value, fmt)
+ return parsed_datetime.date()
+ except ValueError:
+ continue
+
+ # If no format works, try to return None to avoid errors
+ return None
+
+ return value
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> SpeechToTextModel:
+ """Create an instance of SpeechToTextModel from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ # set to None if description (nullable) is None
+ # and __fields_set__ contains the field
+ if self.description is None and "description" in self.__fields_set__:
+ _dict['description'] = None
+
+ # set to None if cost_per_second_usd (nullable) is None
+ # and __fields_set__ contains the field
+ if self.cost_per_second_usd is None and "cost_per_second_usd" in self.__fields_set__:
+ _dict['cost_per_second_usd'] = None
+
+ # set to None if supported_languages (nullable) is None
+ # and __fields_set__ contains the field
+ if self.supported_languages is None and "supported_languages" in self.__fields_set__:
+ _dict['supported_languages'] = None
+
+ # set to None if punctuation (nullable) is None
+ # and __fields_set__ contains the field
+ if self.punctuation is None and "punctuation" in self.__fields_set__:
+ _dict['punctuation'] = None
+
+ # set to None if diarization (nullable) is None
+ # and __fields_set__ contains the field
+ if self.diarization is None and "diarization" in self.__fields_set__:
+ _dict['diarization'] = None
+
+ # set to None if streamable (nullable) is None
+ # and __fields_set__ contains the field
+ if self.streamable is None and "streamable" in self.__fields_set__:
+ _dict['streamable'] = None
+
+ # set to None if real_time_factor (nullable) is None
+ # and __fields_set__ contains the field
+ if self.real_time_factor is None and "real_time_factor" in self.__fields_set__:
+ _dict['real_time_factor'] = None
+
+ # set to None if max_duration_seconds (nullable) is None
+ # and __fields_set__ contains the field
+ if self.max_duration_seconds is None and "max_duration_seconds" in self.__fields_set__:
+ _dict['max_duration_seconds'] = None
+
+ # set to None if max_file_size_bytes (nullable) is None
+ # and __fields_set__ contains the field
+ if self.max_file_size_bytes is None and "max_file_size_bytes" in self.__fields_set__:
+ _dict['max_file_size_bytes'] = None
+
+ # set to None if version (nullable) is None
+ # and __fields_set__ contains the field
+ if self.version is None and "version" in self.__fields_set__:
+ _dict['version'] = None
+
+ # set to None if release_date (nullable) is None
+ # and __fields_set__ contains the field
+ if self.release_date is None and "release_date" in self.__fields_set__:
+ _dict['release_date'] = None
+
+ # set to None if model_type (nullable) is None
+ # and __fields_set__ contains the field
+ if self.model_type is None and "model_type" in self.__fields_set__:
+ _dict['model_type'] = None
+
+ # set to None if accuracy_tier (nullable) is None
+ # and __fields_set__ contains the field
+ if self.accuracy_tier is None and "accuracy_tier" in self.__fields_set__:
+ _dict['accuracy_tier'] = None
+
+ # set to None if supported_audio_encodings (nullable) is None
+ # and __fields_set__ contains the field
+ if self.supported_audio_encodings is None and "supported_audio_encodings" in self.__fields_set__:
+ _dict['supported_audio_encodings'] = None
+
+ # set to None if supported_sample_rates (nullable) is None
+ # and __fields_set__ contains the field
+ if self.supported_sample_rates is None and "supported_sample_rates" in self.__fields_set__:
+ _dict['supported_sample_rates'] = None
+
+ # set to None if speaker_labels (nullable) is None
+ # and __fields_set__ contains the field
+ if self.speaker_labels is None and "speaker_labels" in self.__fields_set__:
+ _dict['speaker_labels'] = None
+
+ # set to None if word_timestamps (nullable) is None
+ # and __fields_set__ contains the field
+ if self.word_timestamps is None and "word_timestamps" in self.__fields_set__:
+ _dict['word_timestamps'] = None
+
+ # set to None if confidence_scores (nullable) is None
+ # and __fields_set__ contains the field
+ if self.confidence_scores is None and "confidence_scores" in self.__fields_set__:
+ _dict['confidence_scores'] = None
+
+ # set to None if language_detection (nullable) is None
+ # and __fields_set__ contains the field
+ if self.language_detection is None and "language_detection" in self.__fields_set__:
+ _dict['language_detection'] = None
+
+ # set to None if custom_vocabulary_support (nullable) is None
+ # and __fields_set__ contains the field
+ if self.custom_vocabulary_support is None and "custom_vocabulary_support" in self.__fields_set__:
+ _dict['custom_vocabulary_support'] = None
+
+ # set to None if profanity_filtering (nullable) is None
+ # and __fields_set__ contains the field
+ if self.profanity_filtering is None and "profanity_filtering" in self.__fields_set__:
+ _dict['profanity_filtering'] = None
+
+ # set to None if noise_reduction (nullable) is None
+ # and __fields_set__ contains the field
+ if self.noise_reduction is None and "noise_reduction" in self.__fields_set__:
+ _dict['noise_reduction'] = None
+
+ # set to None if voice_activity_detection (nullable) is None
+ # and __fields_set__ contains the field
+ if self.voice_activity_detection is None and "voice_activity_detection" in self.__fields_set__:
+ _dict['voice_activity_detection'] = None
+
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> SpeechToTextModel:
+ """Create an instance of SpeechToTextModel from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return SpeechToTextModel.parse_obj(obj)
+
+ _obj = SpeechToTextModel.parse_obj({
+ "id": obj.get("id"),
+ "display_name": obj.get("display_name"),
+ "provider": obj.get("provider"),
+ "description": obj.get("description"),
+ "cost_per_second_usd": obj.get("cost_per_second_usd"),
+ "is_available": obj.get("is_available") if obj.get("is_available") is not None else True,
+ "supported_languages": obj.get("supported_languages"),
+ "punctuation": obj.get("punctuation"),
+ "diarization": obj.get("diarization"),
+ "streamable": obj.get("streamable"),
+ "real_time_factor": obj.get("real_time_factor"),
+ "max_duration_seconds": obj.get("max_duration_seconds"),
+ "max_file_size_bytes": obj.get("max_file_size_bytes"),
+ "version": obj.get("version"),
+ "release_date": obj.get("release_date"),
+ "model_type": obj.get("model_type"),
+ "accuracy_tier": obj.get("accuracy_tier"),
+ "supported_audio_encodings": obj.get("supported_audio_encodings"),
+ "supported_sample_rates": obj.get("supported_sample_rates"),
+ "speaker_labels": obj.get("speaker_labels"),
+ "word_timestamps": obj.get("word_timestamps"),
+ "confidence_scores": obj.get("confidence_scores"),
+ "language_detection": obj.get("language_detection"),
+ "custom_vocabulary_support": obj.get("custom_vocabulary_support"),
+ "profanity_filtering": obj.get("profanity_filtering"),
+ "noise_reduction": obj.get("noise_reduction"),
+ "supports_srt": obj.get("supports_srt") if obj.get("supports_srt") is not None else False,
+ "supports_vtt": obj.get("supports_vtt") if obj.get("supports_vtt") is not None else False,
+ "voice_activity_detection": obj.get("voice_activity_detection")
+ })
+ return _obj
+
+
diff --git a/speechall/models/transcript_language_code.py b/speechall/models/transcript_language_code.py
new file mode 100644
index 0000000..05fc45c
--- /dev/null
+++ b/speechall/models/transcript_language_code.py
@@ -0,0 +1,141 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import json
+import pprint
+import re # noqa: F401
+from aenum import Enum, no_arg
+
+
+
+
+
+class TranscriptLanguageCode(str, Enum):
+ """
+ The language code of the audio file, typically in ISO 639-1 format. Specifying the correct language improves transcription accuracy and speed. The special value `auto` can be used to request automatic language detection, if supported by the selected model. If omitted, the default language is English (`en`).
+ """
+
+ """
+ allowed enum values
+ """
+ AUTO = 'auto'
+ EN = 'en'
+ EN_AU = 'en_au'
+ EN_UK = 'en_uk'
+ EN_US = 'en_us'
+ AF = 'af'
+ AM = 'am'
+ AR = 'ar'
+ AS = 'as'
+ AZ = 'az'
+ BA = 'ba'
+ BE = 'be'
+ BG = 'bg'
+ BN = 'bn'
+ BO = 'bo'
+ BR = 'br'
+ BS = 'bs'
+ CA = 'ca'
+ CS = 'cs'
+ CY = 'cy'
+ DA = 'da'
+ DE = 'de'
+ EL = 'el'
+ ES = 'es'
+ ET = 'et'
+ EU = 'eu'
+ FA = 'fa'
+ FI = 'fi'
+ FO = 'fo'
+ FR = 'fr'
+ GL = 'gl'
+ GU = 'gu'
+ HA = 'ha'
+ HAW = 'haw'
+ HE = 'he'
+ HI = 'hi'
+ HR = 'hr'
+ HT = 'ht'
+ HU = 'hu'
+ HY = 'hy'
+ ID = 'id'
+ IS = 'is'
+ IT = 'it'
+ JA = 'ja'
+ JW = 'jw'
+ KA = 'ka'
+ KK = 'kk'
+ KM = 'km'
+ KN = 'kn'
+ KO = 'ko'
+ LA = 'la'
+ LB = 'lb'
+ LN = 'ln'
+ LO = 'lo'
+ LT = 'lt'
+ LV = 'lv'
+ MG = 'mg'
+ MI = 'mi'
+ MK = 'mk'
+ ML = 'ml'
+ MN = 'mn'
+ MR = 'mr'
+ MS = 'ms'
+ MT = 'mt'
+ MY = 'my'
+ NE = 'ne'
+ NL = 'nl'
+ NN = 'nn'
+ FALSE = 'false'
+ OC = 'oc'
+ PA = 'pa'
+ PL = 'pl'
+ PS = 'ps'
+ PT = 'pt'
+ RO = 'ro'
+ RU = 'ru'
+ SA = 'sa'
+ SD = 'sd'
+ SI = 'si'
+ SK = 'sk'
+ SL = 'sl'
+ SN = 'sn'
+ SO = 'so'
+ SQ = 'sq'
+ SR = 'sr'
+ SU = 'su'
+ SV = 'sv'
+ SW = 'sw'
+ TA = 'ta'
+ TE = 'te'
+ TG = 'tg'
+ TH = 'th'
+ TK = 'tk'
+ TL = 'tl'
+ TR = 'tr'
+ TT = 'tt'
+ UK = 'uk'
+ UR = 'ur'
+ UZ = 'uz'
+ VI = 'vi'
+ YI = 'yi'
+ YO = 'yo'
+ ZH = 'zh'
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptLanguageCode:
+ """Create an instance of TranscriptLanguageCode from a JSON string"""
+ return TranscriptLanguageCode(json.loads(json_str))
+
+
diff --git a/speechall/models/transcript_output_format.py b/speechall/models/transcript_output_format.py
new file mode 100644
index 0000000..2471f0e
--- /dev/null
+++ b/speechall/models/transcript_output_format.py
@@ -0,0 +1,43 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import json
+import pprint
+import re # noqa: F401
+from aenum import Enum, no_arg
+
+
+
+
+
+class TranscriptOutputFormat(str, Enum):
+ """
+ Specifies the desired format of the transcription output. - `text`: Plain text containing the full transcription. - `json_text`: A simple JSON object containing the transcription ID and the full text (`TranscriptionOnlyText` schema). - `json`: A detailed JSON object including segments, timestamps (based on `timestamp_granularity`), language, and potentially speaker labels and provider metadata (`TranscriptionDetailed` schema). - `srt`: SubRip subtitle format (returned as plain text). - `vtt`: WebVTT subtitle format (returned as plain text).
+ """
+
+ """
+ allowed enum values
+ """
+ TEXT = 'text'
+ JSON_TEXT = 'json_text'
+ JSON = 'json'
+ SRT = 'srt'
+ VTT = 'vtt'
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptOutputFormat:
+ """Create an instance of TranscriptOutputFormat from a JSON string"""
+ return TranscriptOutputFormat(json.loads(json_str))
+
+
diff --git a/speechall/models/transcription_detailed.py b/speechall/models/transcription_detailed.py
new file mode 100644
index 0000000..247692d
--- /dev/null
+++ b/speechall/models/transcription_detailed.py
@@ -0,0 +1,99 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import Any, Dict, List, Optional, Union
+from pydantic import BaseModel, Field, StrictFloat, StrictInt, StrictStr, conlist
+from speechall.models.transcription_segment import TranscriptionSegment
+from speechall.models.transcription_word import TranscriptionWord
+
+class TranscriptionDetailed(BaseModel):
+ """
+ A detailed JSON response format containing the full text, detected language, duration, individual timed segments, and potentially speaker labels and provider-specific metadata. Returned when `output_format` is `json`. # noqa: E501
+ """
+ id: StrictStr = Field(default=..., description="A unique identifier for the transcription job/request.")
+ text: StrictStr = Field(default=..., description="The full transcribed text as a single string.")
+ language: Optional[StrictStr] = Field(default=None, description="The detected or specified language of the audio (ISO 639-1 code).")
+ duration: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The total duration of the processed audio file in seconds. **Deprecated**: This property may be removed in future versions as duration analysis might occur asynchronously. Rely on segment end times for duration information if needed. ")
+ segments: Optional[conlist(TranscriptionSegment)] = Field(default=None, description="An array of transcribed segments, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled.")
+ words: Optional[conlist(TranscriptionWord)] = Field(default=None, description="An array of transcribed words, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled.")
+ provider_metadata: Optional[Dict[str, Any]] = Field(default=None, description="An optional object containing additional metadata returned directly from the underlying STT provider. The structure of this object is provider-dependent.")
+ __properties = ["id", "text", "language", "duration", "segments", "words", "provider_metadata"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionDetailed:
+ """Create an instance of TranscriptionDetailed from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ # override the default output from pydantic by calling `to_dict()` of each item in segments (list)
+ _items = []
+ if self.segments:
+ for _item in self.segments:
+ if _item:
+ _items.append(_item.to_dict())
+ _dict['segments'] = _items
+ # override the default output from pydantic by calling `to_dict()` of each item in words (list)
+ _items = []
+ if self.words:
+ for _item in self.words:
+ if _item:
+ _items.append(_item.to_dict())
+ _dict['words'] = _items
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> TranscriptionDetailed:
+ """Create an instance of TranscriptionDetailed from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return TranscriptionDetailed.parse_obj(obj)
+
+ _obj = TranscriptionDetailed.parse_obj({
+ "id": obj.get("id"),
+ "text": obj.get("text"),
+ "language": obj.get("language"),
+ "duration": obj.get("duration"),
+ "segments": [TranscriptionSegment.from_dict(_item) for _item in obj.get("segments")] if obj.get("segments") is not None else None,
+ "words": [TranscriptionWord.from_dict(_item) for _item in obj.get("words")] if obj.get("words") is not None else None,
+ "provider_metadata": obj.get("provider_metadata")
+ })
+ return _obj
+
+
diff --git a/speechall/models/transcription_model_identifier.py b/speechall/models/transcription_model_identifier.py
new file mode 100644
index 0000000..46a3569
--- /dev/null
+++ b/speechall/models/transcription_model_identifier.py
@@ -0,0 +1,108 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import json
+import pprint
+import re # noqa: F401
+from aenum import Enum, no_arg
+
+
+
+
+
+class TranscriptionModelIdentifier(str, Enum):
+ """
+ Unique identifier for a specific Speech-to-Text model, composed as `provider.model_name`. Used to select the engine for transcription.
+ """
+
+ """
+ allowed enum values
+ """
+ AMAZON_DOT_TRANSCRIBE = 'amazon.transcribe'
+ ASSEMBLYAI_DOT_BEST = 'assemblyai.best'
+ ASSEMBLYAI_DOT_NANO = 'assemblyai.nano'
+ ASSEMBLYAI_DOT_SLAM_MINUS_1 = 'assemblyai.slam-1'
+ ASSEMBLYAI_DOT_UNIVERSAL = 'assemblyai.universal'
+ AZURE_DOT_STANDARD = 'azure.standard'
+ CLOUDFLARE_DOT_WHISPER = 'cloudflare.whisper'
+ CLOUDFLARE_DOT_WHISPER_MINUS_LARGE_MINUS_V3_MINUS_TURBO = 'cloudflare.whisper-large-v3-turbo'
+ CLOUDFLARE_DOT_WHISPER_MINUS_TINY_MINUS_EN = 'cloudflare.whisper-tiny-en'
+ DEEPGRAM_DOT_BASE = 'deepgram.base'
+ DEEPGRAM_DOT_BASE_MINUS_CONVERSATIONALAI = 'deepgram.base-conversationalai'
+ DEEPGRAM_DOT_BASE_MINUS_FINANCE = 'deepgram.base-finance'
+ DEEPGRAM_DOT_BASE_MINUS_GENERAL = 'deepgram.base-general'
+ DEEPGRAM_DOT_BASE_MINUS_MEETING = 'deepgram.base-meeting'
+ DEEPGRAM_DOT_BASE_MINUS_PHONECALL = 'deepgram.base-phonecall'
+ DEEPGRAM_DOT_BASE_MINUS_VIDEO = 'deepgram.base-video'
+ DEEPGRAM_DOT_BASE_MINUS_VOICEMAIL = 'deepgram.base-voicemail'
+ DEEPGRAM_DOT_ENHANCED = 'deepgram.enhanced'
+ DEEPGRAM_DOT_ENHANCED_MINUS_FINANCE = 'deepgram.enhanced-finance'
+ DEEPGRAM_DOT_ENHANCED_MINUS_GENERAL = 'deepgram.enhanced-general'
+ DEEPGRAM_DOT_ENHANCED_MINUS_MEETING = 'deepgram.enhanced-meeting'
+ DEEPGRAM_DOT_ENHANCED_MINUS_PHONECALL = 'deepgram.enhanced-phonecall'
+ DEEPGRAM_DOT_NOVA = 'deepgram.nova'
+ DEEPGRAM_DOT_NOVA_MINUS_GENERAL = 'deepgram.nova-general'
+ DEEPGRAM_DOT_NOVA_MINUS_PHONECALL = 'deepgram.nova-phonecall'
+ DEEPGRAM_DOT_NOVA_MINUS_2 = 'deepgram.nova-2'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_ATC = 'deepgram.nova-2-atc'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_AUTOMOTIVE = 'deepgram.nova-2-automotive'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_CONVERSATIONALAI = 'deepgram.nova-2-conversationalai'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_DRIVETHRU = 'deepgram.nova-2-drivethru'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_FINANCE = 'deepgram.nova-2-finance'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_GENERAL = 'deepgram.nova-2-general'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_MEDICAL = 'deepgram.nova-2-medical'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_MEETING = 'deepgram.nova-2-meeting'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_PHONECALL = 'deepgram.nova-2-phonecall'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_VIDEO = 'deepgram.nova-2-video'
+ DEEPGRAM_DOT_NOVA_MINUS_2_MINUS_VOICEMAIL = 'deepgram.nova-2-voicemail'
+ DEEPGRAM_DOT_NOVA_MINUS_3 = 'deepgram.nova-3'
+ DEEPGRAM_DOT_NOVA_MINUS_3_MINUS_GENERAL = 'deepgram.nova-3-general'
+ DEEPGRAM_DOT_NOVA_MINUS_3_MINUS_MEDICAL = 'deepgram.nova-3-medical'
+ DEEPGRAM_DOT_WHISPER = 'deepgram.whisper'
+ DEEPGRAM_DOT_WHISPER_MINUS_BASE = 'deepgram.whisper-base'
+ DEEPGRAM_DOT_WHISPER_MINUS_LARGE = 'deepgram.whisper-large'
+ DEEPGRAM_DOT_WHISPER_MINUS_MEDIUM = 'deepgram.whisper-medium'
+ DEEPGRAM_DOT_WHISPER_MINUS_SMALL = 'deepgram.whisper-small'
+ DEEPGRAM_DOT_WHISPER_MINUS_TINY = 'deepgram.whisper-tiny'
+ FALAI_DOT_ELEVENLABS_MINUS_SPEECH_MINUS_TO_MINUS_TEXT = 'falai.elevenlabs-speech-to-text'
+ FALAI_DOT_SPEECH_MINUS_TO_MINUS_TEXT = 'falai.speech-to-text'
+ FALAI_DOT_WHISPER = 'falai.whisper'
+ FALAI_DOT_WIZPER = 'falai.wizper'
+ FIREWORKSAI_DOT_WHISPER_MINUS_V3 = 'fireworksai.whisper-v3'
+ FIREWORKSAI_DOT_WHISPER_MINUS_V3_MINUS_TURBO = 'fireworksai.whisper-v3-turbo'
+ GLADIA_DOT_STANDARD = 'gladia.standard'
+ GOOGLE_DOT_ENHANCED = 'google.enhanced'
+ GOOGLE_DOT_STANDARD = 'google.standard'
+ GEMINI_DOT_GEMINI_MINUS_2_DOT_5_MINUS_FLASH_MINUS_PREVIEW_MINUS_05_MINUS_20 = 'gemini.gemini-2.5-flash-preview-05-20'
+ GEMINI_DOT_GEMINI_MINUS_2_DOT_5_MINUS_PRO_MINUS_PREVIEW_MINUS_06_MINUS_05 = 'gemini.gemini-2.5-pro-preview-06-05'
+ GEMINI_DOT_GEMINI_MINUS_2_DOT_0_MINUS_FLASH = 'gemini.gemini-2.0-flash'
+ GEMINI_DOT_GEMINI_MINUS_2_DOT_0_MINUS_FLASH_MINUS_LITE = 'gemini.gemini-2.0-flash-lite'
+ GROQ_DOT_DISTIL_MINUS_WHISPER_MINUS_LARGE_MINUS_V3_MINUS_EN = 'groq.distil-whisper-large-v3-en'
+ GROQ_DOT_WHISPER_MINUS_LARGE_MINUS_V3 = 'groq.whisper-large-v3'
+ GROQ_DOT_WHISPER_MINUS_LARGE_MINUS_V3_MINUS_TURBO = 'groq.whisper-large-v3-turbo'
+ IBM_DOT_STANDARD = 'ibm.standard'
+ OPENAI_DOT_WHISPER_MINUS_1 = 'openai.whisper-1'
+ OPENAI_DOT_GPT_MINUS_4O_MINUS_TRANSCRIBE = 'openai.gpt-4o-transcribe'
+ OPENAI_DOT_GPT_MINUS_4O_MINUS_MINI_MINUS_TRANSCRIBE = 'openai.gpt-4o-mini-transcribe'
+ REVAI_DOT_MACHINE = 'revai.machine'
+ REVAI_DOT_FUSION = 'revai.fusion'
+ SPEECHMATICS_DOT_ENHANCED = 'speechmatics.enhanced'
+ SPEECHMATICS_DOT_STANDARD = 'speechmatics.standard'
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionModelIdentifier:
+ """Create an instance of TranscriptionModelIdentifier from a JSON string"""
+ return TranscriptionModelIdentifier(json.loads(json_str))
+
+
diff --git a/speechall/models/transcription_only_text.py b/speechall/models/transcription_only_text.py
new file mode 100644
index 0000000..fb76b8f
--- /dev/null
+++ b/speechall/models/transcription_only_text.py
@@ -0,0 +1,73 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+
+from pydantic import BaseModel, Field, StrictStr
+
+class TranscriptionOnlyText(BaseModel):
+ """
+ A simplified JSON response format containing only the transcription ID and the full transcribed text. Returned when `output_format` is `json_text`. # noqa: E501
+ """
+ id: StrictStr = Field(default=..., description="A unique identifier for the transcription job/request.")
+ text: StrictStr = Field(default=..., description="The full transcribed text as a single string.")
+ __properties = ["id", "text"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionOnlyText:
+ """Create an instance of TranscriptionOnlyText from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> TranscriptionOnlyText:
+ """Create an instance of TranscriptionOnlyText from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return TranscriptionOnlyText.parse_obj(obj)
+
+ _obj = TranscriptionOnlyText.parse_obj({
+ "id": obj.get("id"),
+ "text": obj.get("text")
+ })
+ return _obj
+
+
diff --git a/speechall/models/transcription_provider.py b/speechall/models/transcription_provider.py
new file mode 100644
index 0000000..ab093a7
--- /dev/null
+++ b/speechall/models/transcription_provider.py
@@ -0,0 +1,53 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import json
+import pprint
+import re # noqa: F401
+from aenum import Enum, no_arg
+
+
+
+
+
+class TranscriptionProvider(str, Enum):
+ """
+ The identifier for the underlying Speech-to-Text service provider (e.g., 'openai', 'deepgram').
+ """
+
+ """
+ allowed enum values
+ """
+ AMAZON = 'amazon'
+ ASSEMBLYAI = 'assemblyai'
+ AZURE = 'azure'
+ CLOUDFLARE = 'cloudflare'
+ DEEPGRAM = 'deepgram'
+ FALAI = 'falai'
+ FIREWORKSAI = 'fireworksai'
+ GEMINI = 'gemini'
+ GLADIA = 'gladia'
+ GOOGLE = 'google'
+ GROQ = 'groq'
+ IBM = 'ibm'
+ OPENAI = 'openai'
+ REVAI = 'revai'
+ SPEECHMATICS = 'speechmatics'
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionProvider:
+ """Create an instance of TranscriptionProvider from a JSON string"""
+ return TranscriptionProvider(json.loads(json_str))
+
+
diff --git a/speechall/models/transcription_response.py b/speechall/models/transcription_response.py
new file mode 100644
index 0000000..1d02c67
--- /dev/null
+++ b/speechall/models/transcription_response.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+from inspect import getfullargspec
+import json
+import pprint
+import re # noqa: F401
+
+from typing import Any, List, Optional
+from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
+from speechall.models.transcription_detailed import TranscriptionDetailed
+from speechall.models.transcription_only_text import TranscriptionOnlyText
+from typing import Union, Any, List, TYPE_CHECKING
+from pydantic import StrictStr, Field
+
+TRANSCRIPTIONRESPONSE_ONE_OF_SCHEMAS = ["TranscriptionDetailed", "TranscriptionOnlyText"]
+
+class TranscriptionResponse(BaseModel):
+ """
+ Represents the JSON structure returned when a JSON-based `output_format` (`json` or `json_text`) is requested. It can be either a detailed structure or a simple text-only structure.
+ """
+ # data type: TranscriptionDetailed
+ oneof_schema_1_validator: Optional[TranscriptionDetailed] = None
+ # data type: TranscriptionOnlyText
+ oneof_schema_2_validator: Optional[TranscriptionOnlyText] = None
+ if TYPE_CHECKING:
+ actual_instance: Union[TranscriptionDetailed, TranscriptionOnlyText]
+ else:
+ actual_instance: Any
+ one_of_schemas: List[str] = Field(TRANSCRIPTIONRESPONSE_ONE_OF_SCHEMAS, const=True)
+
+ class Config:
+ validate_assignment = True
+
+ def __init__(self, *args, **kwargs) -> None:
+ if args:
+ if len(args) > 1:
+ raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
+ if kwargs:
+ raise ValueError("If a position argument is used, keyword arguments cannot be used.")
+ super().__init__(actual_instance=args[0])
+ else:
+ super().__init__(**kwargs)
+
+ @validator('actual_instance')
+ def actual_instance_must_validate_oneof(cls, v):
+ # Check if it's a valid type for either schema
+ if isinstance(v, (TranscriptionDetailed, TranscriptionOnlyText)):
+ return v
+
+ # If not an instance of either expected type, raise error
+ error_messages = [
+ f"Error! Input type `{type(v)}` is not `TranscriptionDetailed`",
+ f"Error! Input type `{type(v)}` is not `TranscriptionOnlyText`"
+ ]
+ raise ValueError("No match found when setting `actual_instance` in TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText. Details: " + ", ".join(error_messages))
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> TranscriptionResponse:
+ return cls.from_json(json.dumps(obj))
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionResponse:
+ """Returns the object represented by the json string"""
+ instance = TranscriptionResponse.construct()
+ error_messages = []
+
+ # Parse JSON once to avoid multiple parsing
+ try:
+ json_obj = json.loads(json_str)
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON: {str(e)}")
+
+ # Try TranscriptionDetailed first - if it has extra fields beyond id/text, prefer it
+ # Check if the JSON contains fields that are specific to TranscriptionDetailed
+ has_detailed_fields = any(key in json_obj for key in ['language', 'duration', 'segments', 'words', 'provider_metadata'])
+
+ if has_detailed_fields:
+ # Definitely should be TranscriptionDetailed
+ try:
+ instance.actual_instance = TranscriptionDetailed.from_json(json_str)
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(f"TranscriptionDetailed validation failed: {str(e)}")
+
+ # Try TranscriptionDetailed first (even without extra fields, it might still be the correct type)
+ try:
+ instance.actual_instance = TranscriptionDetailed.from_json(json_str)
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(f"TranscriptionDetailed validation failed: {str(e)}")
+
+ # Fall back to TranscriptionOnlyText
+ try:
+ instance.actual_instance = TranscriptionOnlyText.from_json(json_str)
+ return instance
+ except (ValidationError, ValueError) as e:
+ error_messages.append(f"TranscriptionOnlyText validation failed: {str(e)}")
+
+ # If we get here, neither worked
+ raise ValueError("No match found when deserializing the JSON string into TranscriptionResponse with oneOf schemas: TranscriptionDetailed, TranscriptionOnlyText. Details: " + ", ".join(error_messages))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the actual instance"""
+ if self.actual_instance is None:
+ return "null"
+
+ to_json = getattr(self.actual_instance, "to_json", None)
+ if callable(to_json):
+ return self.actual_instance.to_json()
+ else:
+ return json.dumps(self.actual_instance)
+
+ def to_dict(self) -> dict:
+ """Returns the dict representation of the actual instance"""
+ if self.actual_instance is None:
+ return None
+
+ to_dict = getattr(self.actual_instance, "to_dict", None)
+ if callable(to_dict):
+ return self.actual_instance.to_dict()
+ else:
+ # primitive type
+ return self.actual_instance
+
+ def to_str(self) -> str:
+ """Returns the string representation of the actual instance"""
+ return pprint.pformat(self.dict())
+
+
diff --git a/speechall/models/transcription_segment.py b/speechall/models/transcription_segment.py
new file mode 100644
index 0000000..4724a4b
--- /dev/null
+++ b/speechall/models/transcription_segment.py
@@ -0,0 +1,79 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import Optional, Union
+from pydantic import BaseModel, Field, StrictFloat, StrictInt, StrictStr
+
+class TranscriptionSegment(BaseModel):
+ """
+ Represents a time-coded segment of the transcription, typically corresponding to a phrase, sentence, or speaker turn. # noqa: E501
+ """
+ start: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The start time of the segment in seconds from the beginning of the audio.")
+ end: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The end time of the segment in seconds from the beginning of the audio.")
+ text: Optional[StrictStr] = Field(default=None, description="The transcribed text content of this segment.")
+ speaker: Optional[StrictStr] = Field(default=None, description="An identifier for the speaker of this segment, present if diarization was enabled and successful.")
+ confidence: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The model's confidence score for the transcription of this segment, typically between 0 and 1 (if provided by the model).")
+ __properties = ["start", "end", "text", "speaker", "confidence"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionSegment:
+ """Create an instance of TranscriptionSegment from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> TranscriptionSegment:
+ """Create an instance of TranscriptionSegment from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return TranscriptionSegment.parse_obj(obj)
+
+ _obj = TranscriptionSegment.parse_obj({
+ "start": obj.get("start"),
+ "end": obj.get("end"),
+ "text": obj.get("text"),
+ "speaker": obj.get("speaker"),
+ "confidence": obj.get("confidence")
+ })
+ return _obj
+
+
diff --git a/speechall/models/transcription_word.py b/speechall/models/transcription_word.py
new file mode 100644
index 0000000..ed8833f
--- /dev/null
+++ b/speechall/models/transcription_word.py
@@ -0,0 +1,79 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+from __future__ import annotations
+import pprint
+import re # noqa: F401
+import json
+
+
+from typing import Optional, Union
+from pydantic import BaseModel, Field, StrictFloat, StrictInt, StrictStr
+
+class TranscriptionWord(BaseModel):
+ """
+ Represents a word in the transcription, providing time-coded chunks of the transcription. # noqa: E501
+ """
+ start: Union[StrictFloat, StrictInt] = Field(default=..., description="The start time of the word in seconds from the beginning of the audio.")
+ end: Union[StrictFloat, StrictInt] = Field(default=..., description="The end time of the word in seconds from the beginning of the audio.")
+ word: StrictStr = Field(default=..., description="The transcribed word.")
+ speaker: Optional[StrictStr] = Field(default=None, description="An identifier for the speaker of this word, present if diarization was enabled and successful.")
+ confidence: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="The model's confidence score for the transcription of this word, typically between 0 and 1 (if provided by the model).")
+ __properties = ["start", "end", "word", "speaker", "confidence"]
+
+ class Config:
+ """Pydantic configuration"""
+ allow_population_by_field_name = True
+ validate_assignment = True
+
+ def to_str(self) -> str:
+ """Returns the string representation of the model using alias"""
+ return pprint.pformat(self.dict(by_alias=True))
+
+ def to_json(self) -> str:
+ """Returns the JSON representation of the model using alias"""
+ return json.dumps(self.to_dict())
+
+ @classmethod
+ def from_json(cls, json_str: str) -> TranscriptionWord:
+ """Create an instance of TranscriptionWord from a JSON string"""
+ return cls.from_dict(json.loads(json_str))
+
+ def to_dict(self):
+ """Returns the dictionary representation of the model using alias"""
+ _dict = self.dict(by_alias=True,
+ exclude={
+ },
+ exclude_none=True)
+ return _dict
+
+ @classmethod
+ def from_dict(cls, obj: dict) -> TranscriptionWord:
+ """Create an instance of TranscriptionWord from a dict"""
+ if obj is None:
+ return None
+
+ if not isinstance(obj, dict):
+ return TranscriptionWord.parse_obj(obj)
+
+ _obj = TranscriptionWord.parse_obj({
+ "start": obj.get("start"),
+ "end": obj.get("end"),
+ "word": obj.get("word"),
+ "speaker": obj.get("speaker"),
+ "confidence": obj.get("confidence")
+ })
+ return _obj
+
+
diff --git a/speechall/py.typed b/speechall/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/speechall/rest.py b/speechall/rest.py
new file mode 100644
index 0000000..d072479
--- /dev/null
+++ b/speechall/rest.py
@@ -0,0 +1,329 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import io
+import json
+import logging
+import re
+import ssl
+
+from urllib.parse import urlencode, quote_plus
+import urllib3
+
+from speechall.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError, BadRequestException
+
+
+logger = logging.getLogger(__name__)
+
+SUPPORTED_SOCKS_PROXIES = {"socks5", "socks5h", "socks4", "socks4a"}
+
+
+def is_socks_proxy_url(url):
+ if url is None:
+ return False
+ split_section = url.split("://")
+ if len(split_section) < 2:
+ return False
+ else:
+ return split_section[0].lower() in SUPPORTED_SOCKS_PROXIES
+
+
+class RESTResponse(io.IOBase):
+
+ def __init__(self, resp) -> None:
+ self.urllib3_response = resp
+ self.status = resp.status
+ self.reason = resp.reason
+ self.data = resp.data
+
+ def getheaders(self):
+ """Returns a dictionary of the response headers."""
+ return self.urllib3_response.headers
+
+ def getheader(self, name, default=None):
+ """Returns a given response header."""
+ return self.urllib3_response.headers.get(name, default)
+
+
+class RESTClientObject:
+
+ def __init__(self, configuration, pools_size=4, maxsize=None) -> None:
+ # urllib3.PoolManager will pass all kw parameters to connectionpool
+ # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
+ # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
+ # maxsize is the number of requests to host that are allowed in parallel # noqa: E501
+ # Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
+
+ # cert_reqs
+ if configuration.verify_ssl:
+ cert_reqs = ssl.CERT_REQUIRED
+ else:
+ cert_reqs = ssl.CERT_NONE
+
+ addition_pool_args = {}
+ if configuration.assert_hostname is not None:
+ addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
+
+ if configuration.retries is not None:
+ addition_pool_args['retries'] = configuration.retries
+
+ if configuration.tls_server_name:
+ addition_pool_args['server_hostname'] = configuration.tls_server_name
+
+
+ if configuration.socket_options is not None:
+ addition_pool_args['socket_options'] = configuration.socket_options
+
+ if maxsize is None:
+ if configuration.connection_pool_maxsize is not None:
+ maxsize = configuration.connection_pool_maxsize
+ else:
+ maxsize = 4
+
+ # https pool manager
+ if configuration.proxy:
+ if is_socks_proxy_url(configuration.proxy):
+ from urllib3.contrib.socks import SOCKSProxyManager
+ self.pool_manager = SOCKSProxyManager(
+ cert_reqs=cert_reqs,
+ ca_certs=configuration.ssl_ca_cert,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ proxy_url=configuration.proxy,
+ headers=configuration.proxy_headers,
+ **addition_pool_args
+ )
+ else:
+ self.pool_manager = urllib3.ProxyManager(
+ num_pools=pools_size,
+ maxsize=maxsize,
+ cert_reqs=cert_reqs,
+ ca_certs=configuration.ssl_ca_cert,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ proxy_url=configuration.proxy,
+ proxy_headers=configuration.proxy_headers,
+ **addition_pool_args
+ )
+ else:
+ self.pool_manager = urllib3.PoolManager(
+ num_pools=pools_size,
+ maxsize=maxsize,
+ cert_reqs=cert_reqs,
+ ca_certs=configuration.ssl_ca_cert,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ **addition_pool_args
+ )
+
+ def request(self, method, url, query_params=None, headers=None,
+ body=None, post_params=None, _preload_content=True,
+ _request_timeout=None):
+ """Perform requests.
+
+ :param method: http request method
+ :param url: http request url
+ :param query_params: query parameters in the url
+ :param headers: http request headers
+ :param body: request json body, for `application/json`
+ :param post_params: request post parameters,
+ `application/x-www-form-urlencoded`
+ and `multipart/form-data`
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ """
+ method = method.upper()
+ assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
+ 'PATCH', 'OPTIONS']
+
+ if post_params and body:
+ raise ApiValueError(
+ "body parameter cannot be used with post_params parameter."
+ )
+
+ post_params = post_params or {}
+ headers = headers or {}
+ # url already contains the URL query string
+ # so reset query_params to empty dict
+ query_params = {}
+
+ timeout = None
+ if _request_timeout:
+ if isinstance(_request_timeout, (int,float)): # noqa: E501,F821
+ timeout = urllib3.Timeout(total=_request_timeout)
+ elif (isinstance(_request_timeout, tuple) and
+ len(_request_timeout) == 2):
+ timeout = urllib3.Timeout(
+ connect=_request_timeout[0], read=_request_timeout[1])
+
+ try:
+ # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
+ if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
+
+ # no content type provided or payload is json
+ if not headers.get('Content-Type') or re.search('json', headers['Content-Type'], re.IGNORECASE):
+ request_body = None
+ if body is not None:
+ request_body = json.dumps(body)
+ r = self.pool_manager.request(
+ method, url,
+ body=request_body,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
+ r = self.pool_manager.request(
+ method, url,
+ fields=post_params,
+ encode_multipart=False,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ elif headers['Content-Type'] == 'multipart/form-data':
+ # must del headers['Content-Type'], or the correct
+ # Content-Type which generated by urllib3 will be
+ # overwritten.
+ del headers['Content-Type']
+ # Ensures that dict objects are serialized
+ post_params = [(a, json.dumps(b)) if isinstance(b, dict) else (a,b) for a, b in post_params]
+ r = self.pool_manager.request(
+ method, url,
+ fields=post_params,
+ encode_multipart=True,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ # Pass a `string` parameter directly in the body to support
+ # other content types than Json when `body` argument is
+ # provided in serialized form
+ elif isinstance(body, str) or isinstance(body, bytes):
+ request_body = body
+ r = self.pool_manager.request(
+ method, url,
+ body=request_body,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ else:
+ # Cannot generate the request from given parameters
+ msg = """Cannot prepare a request message for provided
+ arguments. Please check that your arguments match
+ declared content type."""
+ raise ApiException(status=0, reason=msg)
+ # For `GET`, `HEAD`
+ else:
+ r = self.pool_manager.request(method, url,
+ fields={},
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ except urllib3.exceptions.SSLError as e:
+ msg = "{0}\n{1}".format(type(e).__name__, str(e))
+ raise ApiException(status=0, reason=msg)
+
+ if _preload_content:
+ r = RESTResponse(r)
+
+ # log response body
+ logger.debug("response body: %s", r.data)
+
+ if not 200 <= r.status <= 299:
+ if r.status == 400:
+ raise BadRequestException(http_resp=r)
+
+ if r.status == 401:
+ raise UnauthorizedException(http_resp=r)
+
+ if r.status == 403:
+ raise ForbiddenException(http_resp=r)
+
+ if r.status == 404:
+ raise NotFoundException(http_resp=r)
+
+ if 500 <= r.status <= 599:
+ raise ServiceException(http_resp=r)
+
+ raise ApiException(http_resp=r)
+
+ return r
+
+ def get_request(self, url, headers=None, query_params=None, _preload_content=True,
+ _request_timeout=None):
+ return self.request("GET", url,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ query_params=query_params)
+
+ def head_request(self, url, headers=None, query_params=None, _preload_content=True,
+ _request_timeout=None):
+ return self.request("HEAD", url,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ query_params=query_params)
+
+ def options_request(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("OPTIONS", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def delete_request(self, url, headers=None, query_params=None, body=None,
+ _preload_content=True, _request_timeout=None):
+ return self.request("DELETE", url,
+ headers=headers,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def post_request(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("POST", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def put_request(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("PUT", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def patch_request(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("PATCH", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..3a0d0b9
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,3 @@
+pytest~=7.1.3
+pytest-cov>=2.8.1
+pytest-randomly>=3.12.0
diff --git a/test/__init__.py b/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/test/test_base_transcription_configuration.py b/test/test_base_transcription_configuration.py
new file mode 100644
index 0000000..9b43036
--- /dev/null
+++ b/test/test_base_transcription_configuration.py
@@ -0,0 +1,64 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.base_transcription_configuration import BaseTranscriptionConfiguration # noqa: E501
+
+class TestBaseTranscriptionConfiguration(unittest.TestCase):
+ """BaseTranscriptionConfiguration unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> BaseTranscriptionConfiguration:
+ """Test BaseTranscriptionConfiguration
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `BaseTranscriptionConfiguration`
+ """
+ model = BaseTranscriptionConfiguration() # noqa: E501
+ if include_optional:
+ return BaseTranscriptionConfiguration(
+ model = 'openai.whisper-1',
+ language = 'en',
+ output_format = 'text',
+ ruleset_id = '',
+ punctuation = True,
+ timestamp_granularity = 'segment',
+ diarization = True,
+ initial_prompt = '',
+ temperature = 0,
+ smart_format = True,
+ speakers_expected = 1,
+ custom_vocabulary = ["Speechall","Actondon","HIPAA"]
+ )
+ else:
+ return BaseTranscriptionConfiguration(
+ model = 'openai.whisper-1',
+ )
+ """
+
+ def testBaseTranscriptionConfiguration(self):
+ """Test BaseTranscriptionConfiguration"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_create_replacement_ruleset201_response.py b/test/test_create_replacement_ruleset201_response.py
new file mode 100644
index 0000000..f0c7dc2
--- /dev/null
+++ b/test/test_create_replacement_ruleset201_response.py
@@ -0,0 +1,53 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.create_replacement_ruleset201_response import CreateReplacementRuleset201Response # noqa: E501
+
+class TestCreateReplacementRuleset201Response(unittest.TestCase):
+ """CreateReplacementRuleset201Response unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> CreateReplacementRuleset201Response:
+ """Test CreateReplacementRuleset201Response
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `CreateReplacementRuleset201Response`
+ """
+ model = CreateReplacementRuleset201Response() # noqa: E501
+ if include_optional:
+ return CreateReplacementRuleset201Response(
+ id = 'f47ac10b-58cc-4372-a567-0e02b2c3d479'
+ )
+ else:
+ return CreateReplacementRuleset201Response(
+ id = 'f47ac10b-58cc-4372-a567-0e02b2c3d479',
+ )
+ """
+
+ def testCreateReplacementRuleset201Response(self):
+ """Test CreateReplacementRuleset201Response"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_create_replacement_ruleset_request.py b/test/test_create_replacement_ruleset_request.py
new file mode 100644
index 0000000..aa94630
--- /dev/null
+++ b/test/test_create_replacement_ruleset_request.py
@@ -0,0 +1,59 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.create_replacement_ruleset_request import CreateReplacementRulesetRequest # noqa: E501
+
+class TestCreateReplacementRulesetRequest(unittest.TestCase):
+ """CreateReplacementRulesetRequest unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> CreateReplacementRulesetRequest:
+ """Test CreateReplacementRulesetRequest
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `CreateReplacementRulesetRequest`
+ """
+ model = CreateReplacementRulesetRequest() # noqa: E501
+ if include_optional:
+ return CreateReplacementRulesetRequest(
+ name = 'Redact PII',
+ rules = [
+ {"kind":"regex","pattern":"confidential project (\\w+)","replacement":"confidential project [REDACTED]","flags":["i"]}
+ ]
+ )
+ else:
+ return CreateReplacementRulesetRequest(
+ name = 'Redact PII',
+ rules = [
+ {"kind":"regex","pattern":"confidential project (\\w+)","replacement":"confidential project [REDACTED]","flags":["i"]}
+ ],
+ )
+ """
+
+ def testCreateReplacementRulesetRequest(self):
+ """Test CreateReplacementRulesetRequest"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_error_response.py b/test/test_error_response.py
new file mode 100644
index 0000000..985fb8e
--- /dev/null
+++ b/test/test_error_response.py
@@ -0,0 +1,53 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.error_response import ErrorResponse # noqa: E501
+
+class TestErrorResponse(unittest.TestCase):
+ """ErrorResponse unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> ErrorResponse:
+ """Test ErrorResponse
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `ErrorResponse`
+ """
+ model = ErrorResponse() # noqa: E501
+ if include_optional:
+ return ErrorResponse(
+ message = ''
+ )
+ else:
+ return ErrorResponse(
+ message = '',
+ )
+ """
+
+ def testErrorResponse(self):
+ """Test ErrorResponse"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_exact_rule.py b/test/test_exact_rule.py
new file mode 100644
index 0000000..50b7406
--- /dev/null
+++ b/test/test_exact_rule.py
@@ -0,0 +1,58 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.exact_rule import ExactRule # noqa: E501
+
+class TestExactRule(unittest.TestCase):
+ """ExactRule unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> ExactRule:
+ """Test ExactRule
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `ExactRule`
+ """
+ model = ExactRule() # noqa: E501
+ if include_optional:
+ return ExactRule(
+ kind = 'exact',
+ search = 'customer X',
+ replacement = '[REDACTED CUSTOMER NAME]',
+ case_sensitive = True
+ )
+ else:
+ return ExactRule(
+ kind = 'exact',
+ search = 'customer X',
+ replacement = '[REDACTED CUSTOMER NAME]',
+ )
+ """
+
+ def testExactRule(self):
+ """Test ExactRule"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_open_ai_create_translation_request_model.py b/test/test_open_ai_create_translation_request_model.py
new file mode 100644
index 0000000..aa20ad5
--- /dev/null
+++ b/test/test_open_ai_create_translation_request_model.py
@@ -0,0 +1,51 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.open_ai_create_translation_request_model import OpenAICreateTranslationRequestModel # noqa: E501
+
+class TestOpenAICreateTranslationRequestModel(unittest.TestCase):
+ """OpenAICreateTranslationRequestModel unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> OpenAICreateTranslationRequestModel:
+ """Test OpenAICreateTranslationRequestModel
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `OpenAICreateTranslationRequestModel`
+ """
+ model = OpenAICreateTranslationRequestModel() # noqa: E501
+ if include_optional:
+ return OpenAICreateTranslationRequestModel(
+ )
+ else:
+ return OpenAICreateTranslationRequestModel(
+ )
+ """
+
+ def testOpenAICreateTranslationRequestModel(self):
+ """Test OpenAICreateTranslationRequestModel"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_openai_compatible_create_transcription200_response.py b/test/test_openai_compatible_create_transcription200_response.py
new file mode 100644
index 0000000..3eaa784
--- /dev/null
+++ b/test/test_openai_compatible_create_transcription200_response.py
@@ -0,0 +1,78 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.openai_compatible_create_transcription200_response import OpenaiCompatibleCreateTranscription200Response # noqa: E501
+
+class TestOpenaiCompatibleCreateTranscription200Response(unittest.TestCase):
+ """OpenaiCompatibleCreateTranscription200Response unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> OpenaiCompatibleCreateTranscription200Response:
+ """Test OpenaiCompatibleCreateTranscription200Response
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `OpenaiCompatibleCreateTranscription200Response`
+ """
+ model = OpenaiCompatibleCreateTranscription200Response() # noqa: E501
+ if include_optional:
+ return OpenaiCompatibleCreateTranscription200Response(
+ language = '',
+ duration = 1.337,
+ text = '',
+ words = [
+ speechall.models.open_ai_transcription_word.OpenAI_TranscriptionWord(
+ word = '',
+ start = 1.337,
+ end = 1.337, )
+ ],
+ segments = [
+ speechall.models.open_ai_transcription_segment.OpenAI_TranscriptionSegment(
+ id = 56,
+ seek = 56,
+ start = 1.337,
+ end = 1.337,
+ text = '',
+ tokens = [
+ 56
+ ],
+ temperature = 1.337,
+ avg_logprob = 1.337,
+ compression_ratio = 1.337,
+ no_speech_prob = 1.337, )
+ ]
+ )
+ else:
+ return OpenaiCompatibleCreateTranscription200Response(
+ language = '',
+ duration = 1.337,
+ text = '',
+ )
+ """
+
+ def testOpenaiCompatibleCreateTranscription200Response(self):
+ """Test OpenaiCompatibleCreateTranscription200Response"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_openai_compatible_create_translation200_response.py b/test/test_openai_compatible_create_translation200_response.py
new file mode 100644
index 0000000..6e1c26a
--- /dev/null
+++ b/test/test_openai_compatible_create_translation200_response.py
@@ -0,0 +1,72 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.openai_compatible_create_translation200_response import OpenaiCompatibleCreateTranslation200Response # noqa: E501
+
+class TestOpenaiCompatibleCreateTranslation200Response(unittest.TestCase):
+ """OpenaiCompatibleCreateTranslation200Response unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> OpenaiCompatibleCreateTranslation200Response:
+ """Test OpenaiCompatibleCreateTranslation200Response
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `OpenaiCompatibleCreateTranslation200Response`
+ """
+ model = OpenaiCompatibleCreateTranslation200Response() # noqa: E501
+ if include_optional:
+ return OpenaiCompatibleCreateTranslation200Response(
+ language = '',
+ duration = '',
+ text = '',
+ segments = [
+ speechall.models.open_ai_transcription_segment.OpenAI_TranscriptionSegment(
+ id = 56,
+ seek = 56,
+ start = 1.337,
+ end = 1.337,
+ text = '',
+ tokens = [
+ 56
+ ],
+ temperature = 1.337,
+ avg_logprob = 1.337,
+ compression_ratio = 1.337,
+ no_speech_prob = 1.337, )
+ ]
+ )
+ else:
+ return OpenaiCompatibleCreateTranslation200Response(
+ language = '',
+ duration = '',
+ text = '',
+ )
+ """
+
+ def testOpenaiCompatibleCreateTranslation200Response(self):
+ """Test OpenaiCompatibleCreateTranslation200Response"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_regex_group_rule.py b/test/test_regex_group_rule.py
new file mode 100644
index 0000000..c0f231b
--- /dev/null
+++ b/test/test_regex_group_rule.py
@@ -0,0 +1,58 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.regex_group_rule import RegexGroupRule # noqa: E501
+
+class TestRegexGroupRule(unittest.TestCase):
+ """RegexGroupRule unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> RegexGroupRule:
+ """Test RegexGroupRule
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `RegexGroupRule`
+ """
+ model = RegexGroupRule() # noqa: E501
+ if include_optional:
+ return RegexGroupRule(
+ kind = 'regex_group',
+ pattern = 'Order ID: (\d+), Customer: (.+)',
+ group_replacements = {"1":"[REDACTED ORDER ID]"},
+ flags = ["i"]
+ )
+ else:
+ return RegexGroupRule(
+ kind = 'regex_group',
+ pattern = 'Order ID: (\d+), Customer: (.+)',
+ group_replacements = {"1":"[REDACTED ORDER ID]"},
+ )
+ """
+
+ def testRegexGroupRule(self):
+ """Test RegexGroupRule"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_regex_rule.py b/test/test_regex_rule.py
new file mode 100644
index 0000000..108c709
--- /dev/null
+++ b/test/test_regex_rule.py
@@ -0,0 +1,58 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.regex_rule import RegexRule # noqa: E501
+
+class TestRegexRule(unittest.TestCase):
+ """RegexRule unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> RegexRule:
+ """Test RegexRule
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `RegexRule`
+ """
+ model = RegexRule() # noqa: E501
+ if include_optional:
+ return RegexRule(
+ kind = 'regex',
+ pattern = '\b\d{4}\b',
+ replacement = '[REDACTED YEAR]',
+ flags = ["i","m"]
+ )
+ else:
+ return RegexRule(
+ kind = 'regex',
+ pattern = '\b\d{4}\b',
+ replacement = '[REDACTED YEAR]',
+ )
+ """
+
+ def testRegexRule(self):
+ """Test RegexRule"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_remote_transcription_configuration.py b/test/test_remote_transcription_configuration.py
new file mode 100644
index 0000000..79a1b9a
--- /dev/null
+++ b/test/test_remote_transcription_configuration.py
@@ -0,0 +1,69 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.remote_transcription_configuration import RemoteTranscriptionConfiguration # noqa: E501
+
+class TestRemoteTranscriptionConfiguration(unittest.TestCase):
+ """RemoteTranscriptionConfiguration unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> RemoteTranscriptionConfiguration:
+ """Test RemoteTranscriptionConfiguration
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `RemoteTranscriptionConfiguration`
+ """
+ model = RemoteTranscriptionConfiguration() # noqa: E501
+ if include_optional:
+ return RemoteTranscriptionConfiguration(
+ model = 'openai.whisper-1',
+ language = 'en',
+ output_format = 'text',
+ ruleset_id = '',
+ punctuation = True,
+ timestamp_granularity = 'segment',
+ diarization = True,
+ initial_prompt = '',
+ temperature = 0,
+ smart_format = True,
+ speakers_expected = 1,
+ custom_vocabulary = [Speechall, Actondon, HIPAA],
+ file_url = 'https://files.speechall.com/samples/sample.wav',
+ replacement_ruleset = [
+ {"kind":"regex","pattern":"confidential project (\\w+)","replacement":"confidential project [REDACTED]","flags":["i"]}
+ ]
+ )
+ else:
+ return RemoteTranscriptionConfiguration(
+ model = 'openai.whisper-1',
+ file_url = 'https://files.speechall.com/samples/sample.wav',
+ )
+ """
+
+ def testRemoteTranscriptionConfiguration(self):
+ """Test RemoteTranscriptionConfiguration"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_replacement_rule.py b/test/test_replacement_rule.py
new file mode 100644
index 0000000..8008c87
--- /dev/null
+++ b/test/test_replacement_rule.py
@@ -0,0 +1,63 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.replacement_rule import ReplacementRule # noqa: E501
+
+class TestReplacementRule(unittest.TestCase):
+ """ReplacementRule unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> ReplacementRule:
+ """Test ReplacementRule
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `ReplacementRule`
+ """
+ model = ReplacementRule() # noqa: E501
+ if include_optional:
+ return ReplacementRule(
+ kind = 'regex_group',
+ search = 'customer X',
+ replacement = '[REDACTED YEAR]',
+ case_sensitive = True,
+ pattern = 'Order ID: (\d+), Customer: (.+)',
+ flags = [i],
+ group_replacements = {1=[REDACTED ORDER ID]}
+ )
+ else:
+ return ReplacementRule(
+ kind = 'regex_group',
+ search = 'customer X',
+ replacement = '[REDACTED YEAR]',
+ pattern = 'Order ID: (\d+), Customer: (.+)',
+ group_replacements = {1=[REDACTED ORDER ID]},
+ )
+ """
+
+ def testReplacementRule(self):
+ """Test ReplacementRule"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_replacement_rules_api.py b/test/test_replacement_rules_api.py
new file mode 100644
index 0000000..d9a49a3
--- /dev/null
+++ b/test/test_replacement_rules_api.py
@@ -0,0 +1,38 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+
+from speechall.api.replacement_rules_api import ReplacementRulesApi # noqa: E501
+
+
+class TestReplacementRulesApi(unittest.TestCase):
+ """ReplacementRulesApi unit test stubs"""
+
+ def setUp(self) -> None:
+ self.api = ReplacementRulesApi()
+
+ def tearDown(self) -> None:
+ self.api.api_client.close()
+
+ def test_create_replacement_ruleset(self) -> None:
+ """Test case for create_replacement_ruleset
+
+ Create a reusable set of text replacement rules. # noqa: E501
+ """
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_speech_to_text_api.py b/test/test_speech_to_text_api.py
new file mode 100644
index 0000000..8c036f2
--- /dev/null
+++ b/test/test_speech_to_text_api.py
@@ -0,0 +1,52 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+
+from speechall.api.speech_to_text_api import SpeechToTextApi # noqa: E501
+
+
+class TestSpeechToTextApi(unittest.TestCase):
+ """SpeechToTextApi unit test stubs"""
+
+ def setUp(self) -> None:
+ self.api = SpeechToTextApi()
+
+ def tearDown(self) -> None:
+ self.api.api_client.close()
+
+ def test_list_speech_to_text_models(self) -> None:
+ """Test case for list_speech_to_text_models
+
+ Retrieve a list of all available speech-to-text models. # noqa: E501
+ """
+ pass
+
+ def test_transcribe(self) -> None:
+ """Test case for transcribe
+
+ Upload an audio file directly and receive a transcription. # noqa: E501
+ """
+ pass
+
+ def test_transcribe_remote(self) -> None:
+ """Test case for transcribe_remote
+
+ Transcribe an audio file located at a remote URL. # noqa: E501
+ """
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_speech_to_text_model.py b/test/test_speech_to_text_model.py
new file mode 100644
index 0000000..e07852a
--- /dev/null
+++ b/test/test_speech_to_text_model.py
@@ -0,0 +1,86 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.speech_to_text_model import SpeechToTextModel # noqa: E501
+
+class TestSpeechToTextModel(unittest.TestCase):
+ """SpeechToTextModel unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> SpeechToTextModel:
+ """Test SpeechToTextModel
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `SpeechToTextModel`
+ """
+ model = SpeechToTextModel() # noqa: E501
+ if include_optional:
+ return SpeechToTextModel(
+ id = 'openai.whisper-1',
+ display_name = 'Deepgram Nova 2 - General',
+ provider = 'amazon',
+ description = 'Deepgram's latest general-purpose transcription model, optimized for various audio types.',
+ cost_per_second_usd = 0.0043,
+ is_available = True,
+ supported_languages = ["en-US","en-GB","es-ES","fr-FR","auto"],
+ punctuation = True,
+ diarization = True,
+ streamable = True,
+ real_time_factor = 10.0,
+ max_duration_seconds = 14400,
+ max_file_size_bytes = 104857600,
+ version = '2.0',
+ release_date = 'Thu Oct 26 02:00:00 CEST 2023',
+ model_type = 'general',
+ accuracy_tier = 'enhanced',
+ supported_audio_encodings = ["LINEAR16","FLAC","MP3","Opus"],
+ supported_sample_rates = [8000,16000,44100],
+ speaker_labels = True,
+ word_timestamps = True,
+ confidence_scores = True,
+ language_detection = True,
+ custom_vocabulary_support = False,
+ profanity_filtering = True,
+ noise_reduction = True,
+ supports_srt = True,
+ supports_vtt = True,
+ voice_activity_detection = True
+ )
+ else:
+ return SpeechToTextModel(
+ id = 'openai.whisper-1',
+ display_name = 'Deepgram Nova 2 - General',
+ provider = 'amazon',
+ is_available = True,
+ supports_srt = True,
+ supports_vtt = True,
+ )
+ """
+
+ def testSpeechToTextModel(self):
+ """Test SpeechToTextModel"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcript_language_code.py b/test/test_transcript_language_code.py
new file mode 100644
index 0000000..d063e57
--- /dev/null
+++ b/test/test_transcript_language_code.py
@@ -0,0 +1,34 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcript_language_code import TranscriptLanguageCode # noqa: E501
+
+class TestTranscriptLanguageCode(unittest.TestCase):
+ """TranscriptLanguageCode unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testTranscriptLanguageCode(self):
+ """Test TranscriptLanguageCode"""
+ # inst = TranscriptLanguageCode()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcript_output_format.py b/test/test_transcript_output_format.py
new file mode 100644
index 0000000..3bcf36e
--- /dev/null
+++ b/test/test_transcript_output_format.py
@@ -0,0 +1,34 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcript_output_format import TranscriptOutputFormat # noqa: E501
+
+class TestTranscriptOutputFormat(unittest.TestCase):
+ """TranscriptOutputFormat unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testTranscriptOutputFormat(self):
+ """Test TranscriptOutputFormat"""
+ # inst = TranscriptOutputFormat()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_detailed.py b/test/test_transcription_detailed.py
new file mode 100644
index 0000000..b647454
--- /dev/null
+++ b/test/test_transcription_detailed.py
@@ -0,0 +1,74 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_detailed import TranscriptionDetailed # noqa: E501
+
+class TestTranscriptionDetailed(unittest.TestCase):
+ """TranscriptionDetailed unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> TranscriptionDetailed:
+ """Test TranscriptionDetailed
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `TranscriptionDetailed`
+ """
+ model = TranscriptionDetailed() # noqa: E501
+ if include_optional:
+ return TranscriptionDetailed(
+ id = 'txn_123abc456def',
+ text = 'Hello world. This is a test transcription with two speakers.',
+ language = 'en',
+ duration = 15.75,
+ segments = [
+ speechall.models.transcription_segment.TranscriptionSegment(
+ start = 0.5,
+ end = 4.25,
+ text = 'Hello world.',
+ speaker = 'Speaker 0',
+ confidence = 0.95, )
+ ],
+ words = [
+ speechall.models.transcription_word.TranscriptionWord(
+ start = 0.5,
+ end = 4.25,
+ word = 'Hello',
+ speaker = 'Speaker 0',
+ confidence = 0.95, )
+ ],
+ provider_metadata = { }
+ )
+ else:
+ return TranscriptionDetailed(
+ id = 'txn_123abc456def',
+ text = 'Hello world. This is a test transcription with two speakers.',
+ )
+ """
+
+ def testTranscriptionDetailed(self):
+ """Test TranscriptionDetailed"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_model_identifier.py b/test/test_transcription_model_identifier.py
new file mode 100644
index 0000000..3245339
--- /dev/null
+++ b/test/test_transcription_model_identifier.py
@@ -0,0 +1,34 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_model_identifier import TranscriptionModelIdentifier # noqa: E501
+
+class TestTranscriptionModelIdentifier(unittest.TestCase):
+ """TranscriptionModelIdentifier unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testTranscriptionModelIdentifier(self):
+ """Test TranscriptionModelIdentifier"""
+ # inst = TranscriptionModelIdentifier()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_only_text.py b/test/test_transcription_only_text.py
new file mode 100644
index 0000000..591855b
--- /dev/null
+++ b/test/test_transcription_only_text.py
@@ -0,0 +1,55 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_only_text import TranscriptionOnlyText # noqa: E501
+
+class TestTranscriptionOnlyText(unittest.TestCase):
+ """TranscriptionOnlyText unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> TranscriptionOnlyText:
+ """Test TranscriptionOnlyText
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `TranscriptionOnlyText`
+ """
+ model = TranscriptionOnlyText() # noqa: E501
+ if include_optional:
+ return TranscriptionOnlyText(
+ id = 'txn_123abc456def',
+ text = 'Hello world, this is a test transcription.'
+ )
+ else:
+ return TranscriptionOnlyText(
+ id = 'txn_123abc456def',
+ text = 'Hello world, this is a test transcription.',
+ )
+ """
+
+ def testTranscriptionOnlyText(self):
+ """Test TranscriptionOnlyText"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_provider.py b/test/test_transcription_provider.py
new file mode 100644
index 0000000..bfeed93
--- /dev/null
+++ b/test/test_transcription_provider.py
@@ -0,0 +1,34 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_provider import TranscriptionProvider # noqa: E501
+
+class TestTranscriptionProvider(unittest.TestCase):
+ """TranscriptionProvider unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testTranscriptionProvider(self):
+ """Test TranscriptionProvider"""
+ # inst = TranscriptionProvider()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_response.py b/test/test_transcription_response.py
new file mode 100644
index 0000000..cbfcd21
--- /dev/null
+++ b/test/test_transcription_response.py
@@ -0,0 +1,74 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_response import TranscriptionResponse # noqa: E501
+
+class TestTranscriptionResponse(unittest.TestCase):
+ """TranscriptionResponse unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> TranscriptionResponse:
+ """Test TranscriptionResponse
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `TranscriptionResponse`
+ """
+ model = TranscriptionResponse() # noqa: E501
+ if include_optional:
+ return TranscriptionResponse(
+ id = 'txn_123abc456def',
+ text = 'Hello world, this is a test transcription.',
+ language = 'en',
+ duration = 15.75,
+ segments = [
+ speechall.models.transcription_segment.TranscriptionSegment(
+ start = 0.5,
+ end = 4.25,
+ text = 'Hello world.',
+ speaker = 'Speaker 0',
+ confidence = 0.95, )
+ ],
+ words = [
+ speechall.models.transcription_word.TranscriptionWord(
+ start = 0.5,
+ end = 4.25,
+ word = 'Hello',
+ speaker = 'Speaker 0',
+ confidence = 0.95, )
+ ],
+ provider_metadata = { }
+ )
+ else:
+ return TranscriptionResponse(
+ id = 'txn_123abc456def',
+ text = 'Hello world, this is a test transcription.',
+ )
+ """
+
+ def testTranscriptionResponse(self):
+ """Test TranscriptionResponse"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_segment.py b/test/test_transcription_segment.py
new file mode 100644
index 0000000..dd1666e
--- /dev/null
+++ b/test/test_transcription_segment.py
@@ -0,0 +1,56 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_segment import TranscriptionSegment # noqa: E501
+
+class TestTranscriptionSegment(unittest.TestCase):
+ """TranscriptionSegment unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> TranscriptionSegment:
+ """Test TranscriptionSegment
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `TranscriptionSegment`
+ """
+ model = TranscriptionSegment() # noqa: E501
+ if include_optional:
+ return TranscriptionSegment(
+ start = 0.5,
+ end = 4.25,
+ text = 'Hello world.',
+ speaker = 'Speaker 0',
+ confidence = 0.95
+ )
+ else:
+ return TranscriptionSegment(
+ )
+ """
+
+ def testTranscriptionSegment(self):
+ """Test TranscriptionSegment"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_transcription_word.py b/test/test_transcription_word.py
new file mode 100644
index 0000000..0e9c4b1
--- /dev/null
+++ b/test/test_transcription_word.py
@@ -0,0 +1,59 @@
+# coding: utf-8
+
+"""
+ Speechall API
+
+ The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure.
+
+ The version of the OpenAPI document: 0.1.0
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
+
+ Do not edit the class manually.
+""" # noqa: E501
+
+
+import unittest
+import datetime
+
+from speechall.models.transcription_word import TranscriptionWord # noqa: E501
+
+class TestTranscriptionWord(unittest.TestCase):
+ """TranscriptionWord unit test stubs"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def make_instance(self, include_optional) -> TranscriptionWord:
+ """Test TranscriptionWord
+ include_option is a boolean, when False only required
+ params are included, when True both required and
+ optional params are included """
+ # uncomment below to create an instance of `TranscriptionWord`
+ """
+ model = TranscriptionWord() # noqa: E501
+ if include_optional:
+ return TranscriptionWord(
+ start = 0.5,
+ end = 4.25,
+ word = 'Hello',
+ speaker = 'Speaker 0',
+ confidence = 0.95
+ )
+ else:
+ return TranscriptionWord(
+ start = 0.5,
+ end = 4.25,
+ word = 'Hello',
+ )
+ """
+
+ def testTranscriptionWord(self):
+ """Test TranscriptionWord"""
+ # inst_req_only = self.make_instance(include_optional=False)
+ # inst_req_and_optional = self.make_instance(include_optional=True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..069aa4b
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,9 @@
+[tox]
+envlist = py3
+
+[testenv]
+deps=-r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+
+commands=
+ pytest --cov=speechall
diff --git a/uv.lock b/uv.lock
new file mode 100644
index 0000000..3d77660
--- /dev/null
+++ b/uv.lock
@@ -0,0 +1,657 @@
+version = 1
+revision = 2
+requires-python = ">=3.8"
+resolution-markers = [
+ "python_full_version >= '3.9'",
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+
+[[package]]
+name = "aenum"
+version = "3.1.16"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e3/52/6ad8f63ec8da1bf40f96996d25d5b650fdd38f5975f8c813732c47388f18/aenum-3.1.16-py3-none-any.whl", hash = "sha256:9035092855a98e41b66e3d0998bd7b96280e85ceb3a04cc035636138a1943eaf", size = 165627, upload-time = "2025-04-25T03:17:58.89Z" },
+]
+
+[[package]]
+name = "cachetools"
+version = "5.5.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6c/81/3747dad6b14fa2cf53fcf10548cf5aea6913e96fab41a3c198676f8948a5/cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4", size = 28380, upload-time = "2025-02-20T21:01:19.524Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/72/76/20fa66124dbe6be5cafeb312ece67de6b61dd91a0247d1ea13db4ebb33c2/cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a", size = 10080, upload-time = "2025-02-20T21:01:16.647Z" },
+]
+
+[[package]]
+name = "cachetools"
+version = "6.0.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/c0/b0/f539a1ddff36644c28a61490056e5bae43bd7386d9f9c69beae2d7e7d6d1/cachetools-6.0.0.tar.gz", hash = "sha256:f225782b84438f828328fc2ad74346522f27e5b1440f4e9fd18b20ebfd1aa2cf", size = 30160, upload-time = "2025-05-23T20:01:13.076Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/c3/8bb087c903c95a570015ce84e0c23ae1d79f528c349cbc141b5c4e250293/cachetools-6.0.0-py3-none-any.whl", hash = "sha256:82e73ba88f7b30228b5507dce1a1f878498fc669d972aef2dde4f3a3c24f103e", size = 10964, upload-time = "2025-05-23T20:01:11.323Z" },
+]
+
+[[package]]
+name = "chardet"
+version = "5.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" },
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
+]
+
+[[package]]
+name = "distlib"
+version = "0.3.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923, upload-time = "2024-10-09T18:35:47.551Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973, upload-time = "2024-10-09T18:35:44.272Z" },
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", version = "4.13.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "typing-extensions", version = "4.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9' and python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" },
+]
+
+[[package]]
+name = "filelock"
+version = "3.16.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037, upload-time = "2024-09-17T19:02:01.779Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163, upload-time = "2024-09-17T19:02:00.268Z" },
+]
+
+[[package]]
+name = "filelock"
+version = "3.18.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" },
+]
+
+[[package]]
+name = "flake8"
+version = "5.0.4"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.8.1'",
+]
+dependencies = [
+ { name = "mccabe", marker = "python_full_version < '3.8.1'" },
+ { name = "pycodestyle", version = "2.9.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.8.1'" },
+ { name = "pyflakes", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.8.1'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ad/00/9808c62b2d529cefc69ce4e4a1ea42c0f855effa55817b7327ec5b75e60a/flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db", size = 145862, upload-time = "2022-08-03T23:21:27.108Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cf/a0/b881b63a17a59d9d07f5c0cc91a29182c8e8a9aa2bde5b3b2b16519c02f4/flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248", size = 61897, upload-time = "2022-08-03T23:21:25.027Z" },
+]
+
+[[package]]
+name = "flake8"
+version = "7.1.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+]
+dependencies = [
+ { name = "mccabe", marker = "python_full_version >= '3.8.1' and python_full_version < '3.9'" },
+ { name = "pycodestyle", version = "2.12.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.8.1' and python_full_version < '3.9'" },
+ { name = "pyflakes", version = "3.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.8.1' and python_full_version < '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/58/16/3f2a0bb700ad65ac9663262905a025917c020a3f92f014d2ba8964b4602c/flake8-7.1.2.tar.gz", hash = "sha256:c586ffd0b41540951ae41af572e6790dbd49fc12b3aa2541685d253d9bd504bd", size = 48119, upload-time = "2025-02-16T18:45:44.296Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/35/f8/08d37b2cd89da306e3520bd27f8a85692122b42b56c0c2c3784ff09c022f/flake8-7.1.2-py2.py3-none-any.whl", hash = "sha256:1cbc62e65536f65e6d754dfe6f1bada7f5cf392d6f5db3c2b85892466c3e7c1a", size = 57745, upload-time = "2025-02-16T18:45:42.351Z" },
+]
+
+[[package]]
+name = "flake8"
+version = "7.2.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+dependencies = [
+ { name = "mccabe", marker = "python_full_version >= '3.9'" },
+ { name = "pycodestyle", version = "2.13.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "pyflakes", version = "3.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/e7/c4/5842fc9fc94584c455543540af62fd9900faade32511fab650e9891ec225/flake8-7.2.0.tar.gz", hash = "sha256:fa558ae3f6f7dbf2b4f22663e5343b6b6023620461f8d4ff2019ef4b5ee70426", size = 48177, upload-time = "2025-03-29T20:08:39.329Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/83/5c/0627be4c9976d56b1217cb5187b7504e7fd7d3503f8bfd312a04077bd4f7/flake8-7.2.0-py2.py3-none-any.whl", hash = "sha256:93b92ba5bdb60754a6da14fa3b93a9361fd00a59632ada61fd7b130436c40343", size = 57786, upload-time = "2025-03-29T20:08:37.902Z" },
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
+]
+
+[[package]]
+name = "mccabe"
+version = "0.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" },
+]
+
+[[package]]
+name = "packaging"
+version = "25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.3.6"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302, upload-time = "2024-09-17T19:06:50.688Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439, upload-time = "2024-09-17T19:06:49.212Z" },
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.3.8"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" },
+]
+
+[[package]]
+name = "pluggy"
+version = "1.5.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" },
+]
+
+[[package]]
+name = "pluggy"
+version = "1.6.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
+]
+
+[[package]]
+name = "pycodestyle"
+version = "2.9.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b6/83/5bcaedba1f47200f0665ceb07bcb00e2be123192742ee0edfb66b600e5fd/pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785", size = 102127, upload-time = "2022-08-03T23:13:29.715Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/67/e4/fc77f1039c34b3612c4867b69cbb2b8a4e569720b1f19b0637002ee03aff/pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b", size = 41493, upload-time = "2022-08-03T23:13:27.416Z" },
+]
+
+[[package]]
+name = "pycodestyle"
+version = "2.12.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/43/aa/210b2c9aedd8c1cbeea31a50e42050ad56187754b34eb214c46709445801/pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521", size = 39232, upload-time = "2024-08-04T20:26:54.576Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3a/d8/a211b3f85e99a0daa2ddec96c949cac6824bd305b040571b82a03dd62636/pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3", size = 31284, upload-time = "2024-08-04T20:26:53.173Z" },
+]
+
+[[package]]
+name = "pycodestyle"
+version = "2.13.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/04/6e/1f4a62078e4d95d82367f24e685aef3a672abfd27d1a868068fed4ed2254/pycodestyle-2.13.0.tar.gz", hash = "sha256:c8415bf09abe81d9c7f872502a6eee881fbe85d8763dd5b9924bb0a01d67efae", size = 39312, upload-time = "2025-03-29T17:33:30.669Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/be/b00116df1bfb3e0bb5b45e29d604799f7b91dd861637e4d448b4e09e6a3e/pycodestyle-2.13.0-py2.py3-none-any.whl", hash = "sha256:35863c5974a271c7a726ed228a14a4f6daf49df369d8c50cd9a6f58a5e143ba9", size = 31424, upload-time = "2025-03-29T17:33:29.405Z" },
+]
+
+[[package]]
+name = "pydantic"
+version = "1.10.22"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", version = "4.13.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "typing-extensions", version = "4.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/9a/57/5996c63f0deec09e9e901a2b838247c97c6844999562eac4e435bcb83938/pydantic-1.10.22.tar.gz", hash = "sha256:ee1006cebd43a8e7158fb7190bb8f4e2da9649719bff65d0c287282ec38dec6d", size = 356771, upload-time = "2025-04-24T13:38:43.605Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/88/92/91eb5c75a1460292e1f2f3e577122574ebb942fbac19ad2369ff00b9eb24/pydantic-1.10.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:57889565ccc1e5b7b73343329bbe6198ebc472e3ee874af2fa1865cfe7048228", size = 2852481, upload-time = "2025-04-24T13:36:55.045Z" },
+ { url = "https://files.pythonhosted.org/packages/08/f3/dd54b49fc5caaed06f5a0d0a5ec35a81cf722cd6b42455f408dad1ef3f7d/pydantic-1.10.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90729e22426de79bc6a3526b4c45ec4400caf0d4f10d7181ba7f12c01bb3897d", size = 2585586, upload-time = "2025-04-24T13:36:58.453Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/9b/48d10180cc614ffb66da486e99bc1f8b639fb44edf322864f2fb161e2351/pydantic-1.10.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8684d347f351554ec94fdcb507983d3116dc4577fb8799fed63c65869a2d10", size = 3336974, upload-time = "2025-04-24T13:37:00.652Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/80/b55ad0029ae8e7b8b5c81ad7c4e800774a52107d26f70c6696857dc733d5/pydantic-1.10.22-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8dad498ceff2d9ef1d2e2bc6608f5b59b8e1ba2031759b22dfb8c16608e1802", size = 3362338, upload-time = "2025-04-24T13:37:02.42Z" },
+ { url = "https://files.pythonhosted.org/packages/65/e0/8a5cd2cd29a5632581ba466f5792194b2a568aa052ce9da9ba98b634debf/pydantic-1.10.22-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fac529cc654d4575cf8de191cce354b12ba705f528a0a5c654de6d01f76cd818", size = 3519505, upload-time = "2025-04-24T13:37:04.322Z" },
+ { url = "https://files.pythonhosted.org/packages/38/c5/c776d03ec374f22860802b2cee057b41e866be3c80826b53d4c001692db3/pydantic-1.10.22-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4148232aded8dd1dd13cf910a01b32a763c34bd79a0ab4d1ee66164fcb0b7b9d", size = 3485878, upload-time = "2025-04-24T13:37:06.102Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/a2/1efd064513a2c1bcb5c2b0e022cdf77d132ef7f7f20d91bb439d759f6a88/pydantic-1.10.22-cp310-cp310-win_amd64.whl", hash = "sha256:ece68105d9e436db45d8650dc375c760cc85a6793ae019c08769052902dca7db", size = 2299673, upload-time = "2025-04-24T13:37:07.969Z" },
+ { url = "https://files.pythonhosted.org/packages/42/03/e435ed85a9abda29e3fbdb49c572fe4131a68c6daf3855a01eebda9e1b27/pydantic-1.10.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e530a8da353f791ad89e701c35787418605d35085f4bdda51b416946070e938", size = 2845682, upload-time = "2025-04-24T13:37:10.142Z" },
+ { url = "https://files.pythonhosted.org/packages/72/ea/4a625035672f6c06d3f1c7e33aa0af6bf1929991e27017e98b9c2064ae0b/pydantic-1.10.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:654322b85642e9439d7de4c83cb4084ddd513df7ff8706005dada43b34544946", size = 2553286, upload-time = "2025-04-24T13:37:11.946Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/f0/424ad837746e69e9f061ba9be68c2a97aef7376d1911692904d8efbcd322/pydantic-1.10.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8bece75bd1b9fc1c32b57a32831517943b1159ba18b4ba32c0d431d76a120ae", size = 3141232, upload-time = "2025-04-24T13:37:14.394Z" },
+ { url = "https://files.pythonhosted.org/packages/14/67/4979c19e8cfd092085a292485e0b42d74e4eeefbb8cd726aa8ba38d06294/pydantic-1.10.22-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eccb58767f13c6963dcf96d02cb8723ebb98b16692030803ac075d2439c07b0f", size = 3214272, upload-time = "2025-04-24T13:37:16.201Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/04/32339ce43e97519d19e7759902515c750edbf4832a13063a4ab157f83f42/pydantic-1.10.22-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7778e6200ff8ed5f7052c1516617423d22517ad36cc7a3aedd51428168e3e5e8", size = 3321646, upload-time = "2025-04-24T13:37:19.086Z" },
+ { url = "https://files.pythonhosted.org/packages/92/35/dffc1b29cb7198aadab68d75447191e59bdbc1f1d2d51826c9a4460d372f/pydantic-1.10.22-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bffe02767d27c39af9ca7dc7cd479c00dda6346bb62ffc89e306f665108317a2", size = 3244258, upload-time = "2025-04-24T13:37:20.929Z" },
+ { url = "https://files.pythonhosted.org/packages/11/c5/c4ce6ebe7f528a879441eabd2c6dd9e2e4c54f320a8c9344ba93b3aa8701/pydantic-1.10.22-cp311-cp311-win_amd64.whl", hash = "sha256:23bc19c55427091b8e589bc08f635ab90005f2dc99518f1233386f46462c550a", size = 2309702, upload-time = "2025-04-24T13:37:23.296Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/a3/ec66239ed7c9e90edfb85b23b6b18eb290ed7aa05f54837cdcb6a14faa98/pydantic-1.10.22-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:92d0f97828a075a71d9efc65cf75db5f149b4d79a38c89648a63d2932894d8c9", size = 2794865, upload-time = "2025-04-24T13:37:25.087Z" },
+ { url = "https://files.pythonhosted.org/packages/49/6a/99cf3fee612d93210c85f45a161e98c1c5b45b6dcadb21c9f1f838fa9e28/pydantic-1.10.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6af5a2811b6b95b58b829aeac5996d465a5f0c7ed84bd871d603cf8646edf6ff", size = 2534212, upload-time = "2025-04-24T13:37:26.848Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/e6/0f8882775cd9a60b221103ee7d6a89e10eb5a892d877c398df0da7140704/pydantic-1.10.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cf06d8d40993e79af0ab2102ef5da77b9ddba51248e4cb27f9f3f591fbb096e", size = 2994027, upload-time = "2025-04-24T13:37:28.683Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/a3/f20fdecbaa2a2721a6a8ee9e4f344d1f72bd7d56e679371c3f2be15eb8c8/pydantic-1.10.22-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:184b7865b171a6057ad97f4a17fbac81cec29bd103e996e7add3d16b0d95f609", size = 3036716, upload-time = "2025-04-24T13:37:30.547Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/83/dab34436d830c38706685acc77219fc2a209fea2a2301a1b05a2865b28bf/pydantic-1.10.22-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:923ad861677ab09d89be35d36111156063a7ebb44322cdb7b49266e1adaba4bb", size = 3171801, upload-time = "2025-04-24T13:37:32.474Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/6e/b64deccb8a7304d584088972437ea3091e9d99d27a8e7bf2bd08e29ae84e/pydantic-1.10.22-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:82d9a3da1686443fb854c8d2ab9a473251f8f4cdd11b125522efb4d7c646e7bc", size = 3123560, upload-time = "2025-04-24T13:37:34.855Z" },
+ { url = "https://files.pythonhosted.org/packages/08/9a/90d1ab704329a7ae8666354be84b5327d655764003974364767c9d307d3a/pydantic-1.10.22-cp312-cp312-win_amd64.whl", hash = "sha256:1612604929af4c602694a7f3338b18039d402eb5ddfbf0db44f1ebfaf07f93e7", size = 2191378, upload-time = "2025-04-24T13:37:36.649Z" },
+ { url = "https://files.pythonhosted.org/packages/47/8f/67befe3607b342dd6eb80237134ebcc6e8db42138609306eaf2b30e1f273/pydantic-1.10.22-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b259dc89c9abcd24bf42f31951fb46c62e904ccf4316393f317abeeecda39978", size = 2797042, upload-time = "2025-04-24T13:37:38.753Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/91/bfde7d301f8e1c4cff949b3f1eb2c9b27bdd4b2368da0fe88e7350bbe4bc/pydantic-1.10.22-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9238aa0964d80c0908d2f385e981add58faead4412ca80ef0fa352094c24e46d", size = 2538572, upload-time = "2025-04-24T13:37:41.653Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/ce/1b0097ece420354df77d2f01c72278fb43770c8ed732d6b7a303c0c70875/pydantic-1.10.22-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f8029f05b04080e3f1a550575a1bca747c0ea4be48e2d551473d47fd768fc1b", size = 2986271, upload-time = "2025-04-24T13:37:43.551Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/4c/e257edfd5a0025a428aee7a2835e21b51c76a6b1c8994bcccb14d5721eea/pydantic-1.10.22-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5c06918894f119e0431a36c9393bc7cceeb34d1feeb66670ef9b9ca48c073937", size = 3015617, upload-time = "2025-04-24T13:37:45.466Z" },
+ { url = "https://files.pythonhosted.org/packages/00/17/ecf46ff31fd62d382424a07ed60540d4479094204bebeebb6dea597e88c3/pydantic-1.10.22-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e205311649622ee8fc1ec9089bd2076823797f5cd2c1e3182dc0e12aab835b35", size = 3164222, upload-time = "2025-04-24T13:37:47.35Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/47/2d55ec452c9a87347234bbbc70df268e1f081154b1851f0db89638558a1c/pydantic-1.10.22-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:815f0a73d5688d6dd0796a7edb9eca7071bfef961a7b33f91e618822ae7345b7", size = 3117572, upload-time = "2025-04-24T13:37:49.339Z" },
+ { url = "https://files.pythonhosted.org/packages/03/2f/30359a36245b029bec7e442dd780fc242c66e66ad7dd5b50af2dcfd41ff3/pydantic-1.10.22-cp313-cp313-win_amd64.whl", hash = "sha256:9dfce71d42a5cde10e78a469e3d986f656afc245ab1b97c7106036f088dd91f8", size = 2174666, upload-time = "2025-04-24T13:37:51.114Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/f7/5e48d4edbc95371b67eaa470d70fe6488619b029f90159c3acc564e37f0b/pydantic-1.10.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e3f33d1358aa4bc2795208cc29ff3118aeaad0ea36f0946788cf7cadeccc166b", size = 2560769, upload-time = "2025-04-24T13:38:06.184Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/b5/16bab549dd302cfacbf213eb48fe47faf0b848f98ff49c87cf75d7fb7a2e/pydantic-1.10.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:813f079f9cd136cac621f3f9128a4406eb8abd2ad9fdf916a0731d91c6590017", size = 2361835, upload-time = "2025-04-24T13:38:08.907Z" },
+ { url = "https://files.pythonhosted.org/packages/12/3c/07a38ca076b619c7e5d84db1fbecabca8359720b531846225f093e6086bc/pydantic-1.10.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab618ab8dca6eac7f0755db25f6aba3c22c40e3463f85a1c08dc93092d917704", size = 3170283, upload-time = "2025-04-24T13:38:11.755Z" },
+ { url = "https://files.pythonhosted.org/packages/42/2f/d2afc906b797df62563947bcd2b821cf891b333747eed8c4980685501a01/pydantic-1.10.22-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d128e1aaa38db88caca920d5822c98fc06516a09a58b6d3d60fa5ea9099b32cc", size = 3215363, upload-time = "2025-04-24T13:38:14.75Z" },
+ { url = "https://files.pythonhosted.org/packages/22/5b/90014b62103d270e4873707e289427c4481d122475ff007f428f811ada2f/pydantic-1.10.22-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:cc97bbc25def7025e55fc9016080773167cda2aad7294e06a37dda04c7d69ece", size = 3379293, upload-time = "2025-04-24T13:38:17.083Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/b3/32338a42c895ae203de32919e4010e3c42c9fb20fd4e895bb27437d54258/pydantic-1.10.22-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dda5d7157d543b1fa565038cae6e952549d0f90071c839b3740fb77c820fab8", size = 3314657, upload-time = "2025-04-24T13:38:19.373Z" },
+ { url = "https://files.pythonhosted.org/packages/44/b5/fda4029395b4877ca5b7abee5417d2f315c78cab0a377378a7c3a9680497/pydantic-1.10.22-cp38-cp38-win_amd64.whl", hash = "sha256:a093fe44fe518cb445d23119511a71f756f8503139d02fcdd1173f7b76c95ffe", size = 2334648, upload-time = "2025-04-24T13:38:21.537Z" },
+ { url = "https://files.pythonhosted.org/packages/01/6f/9658e94018bc7c4e71863fb0f1ea8d30f8b3439e17df7aa710b2bb72dbca/pydantic-1.10.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ec54c89b2568b258bb30d7348ac4d82bec1b58b377fb56a00441e2ac66b24587", size = 2854460, upload-time = "2025-04-24T13:38:23.753Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/b3/5184ec7d3423a37c193ffaeced03921f4c34d226f3c6852653784f37d38b/pydantic-1.10.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8f1d1a1532e4f3bcab4e34e8d2197a7def4b67072acd26cfa60e92d75803a48", size = 2587418, upload-time = "2025-04-24T13:38:26.449Z" },
+ { url = "https://files.pythonhosted.org/packages/90/25/27d769c5dc7491df5faebfc49a26f83ca2e070a9a788c67fde4c4e51d68b/pydantic-1.10.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad83ca35508c27eae1005b6b61f369f78aae6d27ead2135ec156a2599910121", size = 3331289, upload-time = "2025-04-24T13:38:29.262Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/18/7abe334d3d4de02ef2bbcc079a5782c53b868572b8d74aef2927d4f5b125/pydantic-1.10.22-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53cdb44b78c420f570ff16b071ea8cd5a477635c6b0efc343c8a91e3029bbf1a", size = 3361613, upload-time = "2025-04-24T13:38:31.575Z" },
+ { url = "https://files.pythonhosted.org/packages/68/95/6e649d14718969582ed35d1d70cb24a1ee825c65bec51e3275849d5aab8a/pydantic-1.10.22-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:16d0a5ae9d98264186ce31acdd7686ec05fd331fab9d68ed777d5cb2d1514e5e", size = 3520268, upload-time = "2025-04-24T13:38:33.835Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/87/eb3408e1c040a6d9f703e089d26a723d6c41f23a192e86bd7584d037d576/pydantic-1.10.22-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8aee040e25843f036192b1a1af62117504a209a043aa8db12e190bb86ad7e611", size = 3483434, upload-time = "2025-04-24T13:38:36.078Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/13/ec2e52439137768d1bb0d4955b890f788c23f4aab2cfe9eef9e2b55584de/pydantic-1.10.22-cp39-cp39-win_amd64.whl", hash = "sha256:7f691eec68dbbfca497d3c11b92a3e5987393174cbedf03ec7a4184c35c2def6", size = 2301586, upload-time = "2025-04-24T13:38:39.351Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/e0/1ed151a56869be1588ad2d8cda9f8c1d95b16f74f09a7cea879ca9b63a8b/pydantic-1.10.22-py3-none-any.whl", hash = "sha256:343037d608bcbd34df937ac259708bfc83664dadf88afe8516c4f282d7d471a9", size = 166503, upload-time = "2025-04-24T13:38:41.374Z" },
+]
+
+[[package]]
+name = "pyflakes"
+version = "2.5.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/07/92/f0cb5381f752e89a598dd2850941e7f570ac3cb8ea4a344854de486db152/pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3", size = 66388, upload-time = "2022-07-30T17:29:05.816Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/dc/13/63178f59f74e53acc2165aee4b002619a3cfa7eeaeac989a9eb41edf364e/pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2", size = 66116, upload-time = "2022-07-30T17:29:04.179Z" },
+]
+
+[[package]]
+name = "pyflakes"
+version = "3.2.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/57/f9/669d8c9c86613c9d568757c7f5824bd3197d7b1c6c27553bc5618a27cce2/pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", size = 63788, upload-time = "2024-01-05T00:28:47.703Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d4/d7/f1b7db88d8e4417c5d47adad627a93547f44bdc9028372dbd2313f34a855/pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a", size = 62725, upload-time = "2024-01-05T00:28:45.903Z" },
+]
+
+[[package]]
+name = "pyflakes"
+version = "3.3.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/af/cc/1df338bd7ed1fa7c317081dcf29bf2f01266603b301e6858856d346a12b3/pyflakes-3.3.2.tar.gz", hash = "sha256:6dfd61d87b97fba5dcfaaf781171ac16be16453be6d816147989e7f6e6a9576b", size = 64175, upload-time = "2025-03-31T13:21:20.34Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/15/40/b293a4fa769f3b02ab9e387c707c4cbdc34f073f945de0386107d4e669e6/pyflakes-3.3.2-py2.py3-none-any.whl", hash = "sha256:5039c8339cbb1944045f4ee5466908906180f13cc99cc9949348d10f82a5c32a", size = 63164, upload-time = "2025-03-31T13:21:18.503Z" },
+]
+
+[[package]]
+name = "pygments"
+version = "2.19.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" },
+]
+
+[[package]]
+name = "pyproject-api"
+version = "1.8.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+dependencies = [
+ { name = "packaging", marker = "python_full_version < '3.9'" },
+ { name = "tomli", marker = "python_full_version < '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/bb/19/441e0624a8afedd15bbcce96df1b80479dd0ff0d965f5ce8fde4f2f6ffad/pyproject_api-1.8.0.tar.gz", hash = "sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496", size = 22340, upload-time = "2024-09-18T23:18:37.805Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ba/f4/3c4ddfcc0c19c217c6de513842d286de8021af2f2ab79bbb86c00342d778/pyproject_api-1.8.0-py3-none-any.whl", hash = "sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228", size = 13100, upload-time = "2024-09-18T23:18:35.927Z" },
+]
+
+[[package]]
+name = "pyproject-api"
+version = "1.9.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+dependencies = [
+ { name = "packaging", marker = "python_full_version >= '3.9'" },
+ { name = "tomli", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/19/fd/437901c891f58a7b9096511750247535e891d2d5a5a6eefbc9386a2b41d5/pyproject_api-1.9.1.tar.gz", hash = "sha256:43c9918f49daab37e302038fc1aed54a8c7a91a9fa935d00b9a485f37e0f5335", size = 22710, upload-time = "2025-05-12T14:41:58.025Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ef/e6/c293c06695d4a3ab0260ef124a74ebadba5f4c511ce3a4259e976902c00b/pyproject_api-1.9.1-py3-none-any.whl", hash = "sha256:7d6238d92f8962773dd75b5f0c4a6a27cce092a14b623b811dba656f3b628948", size = 13158, upload-time = "2025-05-12T14:41:56.217Z" },
+]
+
+[[package]]
+name = "pytest"
+version = "8.3.5"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+dependencies = [
+ { name = "colorama", marker = "python_full_version < '3.9' and sys_platform == 'win32'" },
+ { name = "exceptiongroup", marker = "python_full_version < '3.9'" },
+ { name = "iniconfig", marker = "python_full_version < '3.9'" },
+ { name = "packaging", marker = "python_full_version < '3.9'" },
+ { name = "pluggy", version = "1.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "tomli", marker = "python_full_version < '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" },
+]
+
+[[package]]
+name = "pytest"
+version = "8.4.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+dependencies = [
+ { name = "colorama", marker = "python_full_version >= '3.9' and sys_platform == 'win32'" },
+ { name = "exceptiongroup", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" },
+ { name = "iniconfig", marker = "python_full_version >= '3.9'" },
+ { name = "packaging", marker = "python_full_version >= '3.9'" },
+ { name = "pluggy", version = "1.6.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "pygments", marker = "python_full_version >= '3.9'" },
+ { name = "tomli", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fb/aa/405082ce2749be5398045152251ac69c0f3578c7077efc53431303af97ce/pytest-8.4.0.tar.gz", hash = "sha256:14d920b48472ea0dbf68e45b96cd1ffda4705f33307dcc86c676c1b5104838a6", size = 1515232, upload-time = "2025-06-02T17:36:30.03Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2f/de/afa024cbe022b1b318a3d224125aa24939e99b4ff6f22e0ba639a2eaee47/pytest-8.4.0-py3-none-any.whl", hash = "sha256:f40f825768ad76c0977cbacdf1fd37c6f7a468e460ea6a0636078f8972d4517e", size = 363797, upload-time = "2025-06-02T17:36:27.859Z" },
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "six" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
+]
+
+[[package]]
+name = "six"
+version = "1.17.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
+]
+
+[[package]]
+name = "speechall"
+version = "0.1.0"
+source = { editable = "." }
+dependencies = [
+ { name = "aenum" },
+ { name = "pydantic" },
+ { name = "python-dateutil" },
+ { name = "urllib3", version = "2.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "urllib3", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+]
+
+[package.optional-dependencies]
+dev = [
+ { name = "flake8", version = "5.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.8.1'" },
+ { name = "flake8", version = "7.1.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.8.1' and python_full_version < '3.9'" },
+ { name = "flake8", version = "7.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "pytest", version = "8.3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "pytest", version = "8.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "tox", version = "4.25.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "tox", version = "4.26.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "aenum", specifier = ">=3.1.11" },
+ { name = "flake8", marker = "extra == 'dev'", specifier = ">=4.0.0" },
+ { name = "pydantic", specifier = ">=1.10.5,<2" },
+ { name = "pytest", marker = "extra == 'dev'", specifier = ">=7.2.1" },
+ { name = "python-dateutil", specifier = ">=2.8.2" },
+ { name = "tox", marker = "extra == 'dev'", specifier = ">=3.9.0" },
+ { name = "urllib3", specifier = ">=1.25.3" },
+]
+provides-extras = ["dev"]
+
+[[package]]
+name = "tomli"
+version = "2.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" },
+ { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" },
+ { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" },
+ { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" },
+ { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" },
+ { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" },
+ { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" },
+ { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" },
+ { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" },
+ { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" },
+ { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" },
+ { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" },
+]
+
+[[package]]
+name = "tox"
+version = "4.25.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+dependencies = [
+ { name = "cachetools", version = "5.5.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "chardet", marker = "python_full_version < '3.9'" },
+ { name = "colorama", marker = "python_full_version < '3.9'" },
+ { name = "filelock", version = "3.16.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "packaging", marker = "python_full_version < '3.9'" },
+ { name = "platformdirs", version = "4.3.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "pluggy", version = "1.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "pyproject-api", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "tomli", marker = "python_full_version < '3.9'" },
+ { name = "typing-extensions", version = "4.13.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "virtualenv", marker = "python_full_version < '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fe/87/692478f0a194f1cad64803692642bd88c12c5b64eee16bf178e4a32e979c/tox-4.25.0.tar.gz", hash = "sha256:dd67f030317b80722cf52b246ff42aafd3ed27ddf331c415612d084304cf5e52", size = 196255, upload-time = "2025-03-27T15:13:37.519Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f9/38/33348de6fc4b1afb3d76d8485c8aecbdabcfb3af8da53d40c792332e2b37/tox-4.25.0-py3-none-any.whl", hash = "sha256:4dfdc7ba2cc6fdc6688dde1b21e7b46ff6c41795fb54586c91a3533317b5255c", size = 172420, upload-time = "2025-03-27T15:13:35.703Z" },
+]
+
+[[package]]
+name = "tox"
+version = "4.26.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+dependencies = [
+ { name = "cachetools", version = "6.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "chardet", marker = "python_full_version >= '3.9'" },
+ { name = "colorama", marker = "python_full_version >= '3.9'" },
+ { name = "filelock", version = "3.18.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "packaging", marker = "python_full_version >= '3.9'" },
+ { name = "platformdirs", version = "4.3.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "pluggy", version = "1.6.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "pyproject-api", version = "1.9.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "tomli", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" },
+ { name = "typing-extensions", version = "4.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9' and python_full_version < '3.11'" },
+ { name = "virtualenv", marker = "python_full_version >= '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fd/3c/dcec0c00321a107f7f697fd00754c5112572ea6dcacb40b16d8c3eea7c37/tox-4.26.0.tar.gz", hash = "sha256:a83b3b67b0159fa58e44e646505079e35a43317a62d2ae94725e0586266faeca", size = 197260, upload-time = "2025-05-13T15:04:28.481Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/de/14/f58b4087cf248b18c795b5c838c7a8d1428dfb07cb468dad3ec7f54041ab/tox-4.26.0-py3-none-any.whl", hash = "sha256:75f17aaf09face9b97bd41645028d9f722301e912be8b4c65a3f938024560224", size = 172761, upload-time = "2025-05-13T15:04:26.207Z" },
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.13.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967, upload-time = "2025-04-10T14:19:05.416Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806, upload-time = "2025-04-10T14:19:03.967Z" },
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.14.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.2.3"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.8.1' and python_full_version < '3.9'",
+ "python_full_version < '3.8.1'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677, upload-time = "2024-09-12T10:52:18.401Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338, upload-time = "2024-09-12T10:52:16.589Z" },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.9'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" },
+]
+
+[[package]]
+name = "virtualenv"
+version = "20.31.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "distlib" },
+ { name = "filelock", version = "3.16.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "filelock", version = "3.18.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+ { name = "platformdirs", version = "4.3.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" },
+ { name = "platformdirs", version = "4.3.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/56/2c/444f465fb2c65f40c3a104fd0c495184c4f2336d65baf398e3c75d72ea94/virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af", size = 6076316, upload-time = "2025-05-08T17:58:23.811Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f3/40/b1c265d4b2b62b58576588510fc4d1fe60a86319c8de99fd8e9fec617d2c/virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11", size = 6057982, upload-time = "2025-05-08T17:58:21.15Z" },
+]