Official Python SDK for the Lyzr Agent Simulation Engine (A-Sim) Platform
Installation β’ Quick Start β’ Features β’ API Reference β’ Examples
The Agent Simulation Engine (A-Sim) SDK enables you to programmatically test, evaluate, and improve your AI agents through automated simulation and reinforcement learning loops.
A-Sim is a comprehensive platform for:
| Feature | Description |
|---|---|
| Automated Testing | Generate realistic test cases from persona Γ scenario combinations |
| AI Evaluation | Evaluate agent responses on accuracy, helpfulness, safety & more |
| RL Hardening | Continuously improve agents through reinforcement learning loops |
| Ground Truth Validation | Validate responses against knowledge base facts |
pip install git+https://github.com/LyzrCore/agent-simulation-engine.git#subdirectory=sdkGet your studio-api-key from Lyzr Studio
from agent_simulation_engine import ASIMEngine
import time
# ============================================
# STEP 1: Initialize Engine
# ============================================
engine = ASIMEngine(api_key="your-studio-api-key")
# ============================================
# STEP 2: Create Environment
# ============================================
env = engine.environments.create(
agent_id="your-agent-id", # From Lyzr Studio
name="My Agent Tests"
)
print(f"β Environment created: {env.environment_id}")
# ============================================
# STEP 3: Generate Personas & Scenarios
# ============================================
personas = engine.personas.generate(env.environment_id)
print(f"β Generated {personas.count} personas")
scenarios = engine.scenarios.generate(env.environment_id)
print(f"β Generated {scenarios.count} scenarios")
# ============================================
# STEP 4: Generate Simulations (Test Cases)
# ============================================
job = engine.simulations.generate(env.environment_id)
print(f"β Simulation job started: {job.job_id}")
# Wait for completion
while True:
status = engine.jobs.get_status(env.environment_id, job.job_id)
print(f" Progress: {status.progress}")
if status.summary.completed + status.summary.failed == status.summary.total:
break
time.sleep(3)
print(f"β Simulations generated!")
# ============================================
# STEP 5: Run Evaluations
# ============================================
eval_run = engine.evaluations.create(
environment_id=env.environment_id,
evaluation_run_name="Round 1"
)
print(f"β Evaluation started: {eval_run.evaluation_run_id}")
# Wait for completion
while True:
status = engine.jobs.get_evaluation_status(env.environment_id, eval_run.job_id)
print(f" Progress: {status.progress}")
if status.summary.completed + status.summary.failed == status.summary.total:
break
time.sleep(3)
# ============================================
# STEP 6: View Results
# ============================================
results = engine.evaluations.list(env.environment_id)
pass_count = sum(1 for e in results.evaluations if e.judgment == "PASS")
fail_count = sum(1 for e in results.evaluations if e.judgment == "FAIL")
print(f"\nπ Results: {pass_count} PASS | {fail_count} FAIL")
# ============================================
# STEP 7: Harden Agent (if needed)
# ============================================
if fail_count > 0:
hardening = engine.hardening.harden_agent(
environment_id=env.environment_id,
run_id=eval_run.evaluation_run_id,
round_number=1
)
print(f"\nπ§ Agent Hardened!")
print(f" Original: {hardening.original_config.agent_instructions[:100]}...")
print(f" Improved: {hardening.improved_config.agent_instructions[:100]}...")python your_script.pypip install git+https://github.com/LyzrCore/agent-simulation-engine.git#subdirectory=sdkgit clone https://github.com/LyzrCore/agent-simulation-engine.git
cd agent-simulation-engine/sdk
pip install -e .- Python 3.8+
requests >= 2.28.0pydantic >= 2.0.0
from agent_simulation_engine import ASIMEngine
import time
# Initialize the engine
engine = ASIMEngine(api_key="studio-api-key")
# Create an environment for your agent
env = engine.environments.create(
agent_id="studio-agent-key",
name="Customer Support Tests"
)
# Generate personas and scenarios using AI
personas = engine.personas.generate(env.environment_id)
scenarios = engine.scenarios.generate(env.environment_id)
# Generate test simulations
job = engine.simulations.generate(env.environment_id)
# Poll until complete
while True:
status = engine.jobs.get_status(env.environment_id, job.job_id)
if status.summary.completed + status.summary.failed == status.summary.total:
break
time.sleep(2)
# Run evaluations
eval_run = engine.evaluations.create(
environment_id=env.environment_id,
evaluation_run_name="Round 1"
)
print(f"Evaluation started: {eval_run.evaluation_run_id}")# 1. Run initial evaluations
eval_run = engine.evaluations.create(env_id, "Round 1")
# 2. Wait for completion, then harden
hardening = engine.hardening.harden_agent(
environment_id=env_id,
run_id=eval_run.evaluation_run_id,
round_number=1
)
# 3. View improvements
print("Original:", hardening.original_config.agent_instructions)
print("Improved:", hardening.improved_config.agent_instructions)
# 4. Continue with improved agent
new_round = engine.hardening.continue_run(
environment_id=env_id,
run_id=eval_run.evaluation_run_id,
round_number=1,
agent_config=hardening.improved_config.model_dump()
)
# 5. Repeat until all tests pass!from agent_simulation_engine import ASIMEngine
engine = ASIMEngine(
api_key="studio-api-key", # Required
base_url="https://agent.api.lyzr.ai", # Optional
timeout=30 # Optional (seconds)
)| Resource | Description | Methods |
|---|---|---|
engine.environments |
Manage test environments | create(), get(), list_by_agent(), delete() |
engine.personas |
User archetypes | create(), list(), generate(), delete() |
engine.scenarios |
Task types | create(), list(), generate(), delete() |
engine.simulations |
Test cases | create(), list(), get(), update(), delete(), generate() |
engine.evaluations |
Run evaluations | create(), list(), get() |
engine.jobs |
Track async jobs | get_status(), list(), cancel(), get_evaluation_status() |
engine.evaluation_runs |
RL training rounds | get(), list(), get_round(), sync_round() |
engine.hardening |
Improve agents | harden_agent(), continue_run() |
# Create environment
env = engine.environments.create(
agent_id="studio-agent-key",
name="Product Support Tests"
)
# Add personas manually
personas = engine.personas.create(env.environment_id, personas=[
{"name": "New Customer", "description": "First-time user, unfamiliar with product"},
{"name": "Power User", "description": "Experienced user with technical knowledge"},
{"name": "Frustrated Customer", "description": "User experiencing issues, potentially angry"},
])
# Or generate with AI
personas = engine.personas.generate(env.environment_id)
scenarios = engine.scenarios.generate(env.environment_id)# Start evaluation run
eval_run = engine.evaluations.create(
environment_id=env.environment_id,
evaluation_run_name="Initial Assessment",
metrics=["task_completion", "hallucinations", "answer_relevancy"]
)
# Poll for completion
while True:
status = engine.jobs.get_evaluation_status(env.environment_id, eval_run.job_id)
print(f"Progress: {status.progress}")
if status.summary.completed + status.summary.failed == status.summary.total:
break
time.sleep(2)
# Get results
results = engine.evaluations.list(env.environment_id)
for eval in results.evaluations:
print(f"{eval.id}: {eval.judgment} - {eval.scores}")The SDK provides specific exception types for different error scenarios:
from agent_simulation_engine import (
ASIMEngine,
ASIMError,
AuthenticationError,
NotFoundError,
ValidationError,
RateLimitError,
ServerError,
)
engine = ASIMEngine(api_key="studio-api-key")
try:
env = engine.environments.get("invalid-id")
except AuthenticationError:
print("Invalid API key")
except NotFoundError:
print("Environment not found")
except ValidationError as e:
print(f"Invalid request: {e.message}")
except RateLimitError:
print("Rate limit exceeded, please retry later")
except ServerError:
print("Server error, please try again")
except ASIMError as e:
print(f"API error: {e.message} (status: {e.status_code})")| Resource | Link |
|---|---|
| GitHub Issues | Report a Bug |
Built with by Lyzr AI


