Skip to content

Latest commit

 

History

History
269 lines (214 loc) · 5.59 KB

File metadata and controls

269 lines (214 loc) · 5.59 KB

AnyFast Python SDK

AnyFast Python SDK for AI services including chat completions, image generation, video generation and asset management.

Installation

pip install anyfast

Configuration

from anyfast import AnyFast

client = AnyFast(
    api_key="your-api-key",
    base_url="https://www.anyfast.ai",  # optional, this is the default
)

Or use the ANYFAST_API_KEY environment variable:

client = AnyFast()

Options

Parameter Default Description
api_key ANYFAST_API_KEY env var API key for authentication
base_url https://www.anyfast.ai API base URL
gateway_url https://www.anyfast.ai Gateway URL
timeout 30.0 Request timeout in seconds

Chat Completions

OpenAI-compatible (/v1/chat/completions)

Works with GPT, Claude, Doubao, DeepSeek, Qwen, Grok, Gemini (compat), MiniMax, Kimi, etc.

result = client.chat.completions(
    model="gpt-4o",
    messages=[{"role": "user", "content": "Hello!"}],
)
print(result["choices"][0]["message"]["content"])

Streaming

for chunk in client.chat.completions(
    model="gpt-4o",
    messages=[{"role": "user", "content": "Hello!"}],
    stream=True,
):
    content = chunk["choices"][0].get("delta", {}).get("content", "")
    print(content, end="", flush=True)

Anthropic Messages (/v1/messages)

result = client.chat.messages(
    model="claude-sonnet-4-20250514",
    messages=[{"role": "user", "content": "Hello!"}],
    max_tokens=1024,
)
print(result["content"][0]["text"])

Doubao Responses (/v1/responses)

result = client.chat.responses(
    model="doubao-seed-2.0-pro",
    input="Hello!",
)
print(result["output"][0]["content"][0]["text"])

Supports multimodal input:

result = client.chat.responses(
    model="doubao-seed-1-6-vision-250815",
    input=[
        {
            "role": "user",
            "content": [
                {"type": "input_text", "text": "What is in this image?"},
                {"type": "input_image", "image_url": "https://example.com/photo.png"},
            ],
        }
    ],
)

Gemini Native (/v1beta/models/{model}:generateContent)

result = client.chat.gemini(
    "gemini-2.0-flash",
    contents=[
        {"role": "user", "parts": [{"text": "Hello!"}]},
    ],
)
print(result["candidates"][0]["content"]["parts"][0]["text"])

Image Generation

Synchronous

result = client.image.generate(
    model="flux-2-pro",
    prompt="A beautiful sunset over the ocean",
    size="1024x1024",
)
print(result["data"][0]["url"])

Async with Polling

response = client.image.generate_async(
    model="doubao-seedream-5-0-260128",
    prompt="A beautiful sunset over the ocean",
)

status = client.image.query_task(response.task_id)
while status.is_processing():
    import time
    time.sleep(2)
    status = client.image.query_task(response.task_id)

Blocking Run

status = client.image.run(
    {"model": "flux-2-pro", "prompt": "A beautiful sunset"},
    poll_interval=3.0,
    timeout=180.0,
)

if status.is_completed():
    print(status.result)

Video Generation

Seedance

status = client.video.run(
    {
        "model": "seedance",
        "content": [{"type": "text", "text": "A cat playing with a ball"}],
        "resolution": "720p",
        "ratio": "16:9",
        "duration": 5,
    },
    poll_interval=5.0,
    timeout=600.0,
)

Image-to-video:

status = client.video.run({
    "model": "seedance",
    "content": [
        {"type": "text", "text": "Make it come alive"},
        {"type": "image_url", "image_url": {"url": "https://example.com/frame.jpg"}, "role": "first_frame"},
    ],
})

Kling

# Text-to-video
status = client.video.run_kling_text2video(
    {
        "model_name": "kling-v2-master",
        "prompt": "A sunset over the ocean",
        "mode": "pro",
        "duration": "10",
        "aspect_ratio": "16:9",
    },
    poll_interval=5.0,
)

# Image-to-video
status = client.video.run_kling_image2video({
    "model_name": "kling-v2-master",
    "image": "https://example.com/frame.jpg",
    "prompt": "Make it move",
})

# Multi-image-to-video
status = client.video.run_kling_multi_image2video({
    "model_name": "kling-v1-6",
    "image_list": [
        {"image": "https://example.com/1.jpg"},
        {"image": "https://example.com/2.jpg"},
    ],
    "prompt": "Transition between scenes",
})

Manual polling for Kling:

response = client.video.kling_text2video(
    model_name="kling-v2-master",
    prompt="A cat",
)

status = client.video.kling_query_task("text2video", response.task_id)

Asset Management (ByteDance Volc)

# Create asset group
group = client.asset.create_group(Name="my-group")

# List groups
groups = client.asset.list_groups()

# Upload asset
asset = client.asset.create_asset(
    GroupId=group["Id"],
    Name="reference-image",
    AssetType="Image",
    URL="https://example.com/image.png",
)

# List assets
assets = client.asset.list_assets(Filter={"GroupIds": [group["Id"]]})

Error Handling

from anyfast import AnyFastError, AuthenticationError, BadRequestError

try:
    result = client.chat.completions(
        model="gpt-4o",
        messages=[{"role": "user", "content": "Hello"}],
    )
except AuthenticationError as e:
    print(f"Auth failed: {e}")
except BadRequestError as e:
    print(f"Bad request: {e}")
except AnyFastError as e:
    print(f"Status: {e.status}, Code: {e.code}, Message: {e}")

License

Apache License 2.0