diff --git a/.gitignore b/.gitignore index b0ad827..451dcce 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ target/ Cargo.lock +# Conductor workspace +.context/ + # Python artifacts __pycache__/ *.py[cod] diff --git a/AGENTS.md b/AGENTS.md deleted file mode 100644 index 270c082..0000000 --- a/AGENTS.md +++ /dev/null @@ -1,320 +0,0 @@ -# TurboAPI v0.3.0+ - AI Agent Guide πŸ€– - -**For AI assistants, code generation tools, and automated development systems** - -## 🎯 **What TurboAPI Is** - -TurboAPI is a **FastAPI-compatible** Python web framework that delivers **5-10x better performance** through: -- **Rust-powered HTTP core** (zero Python overhead) -- **Python 3.13 free-threading** support (true parallelism) -- **Zero-copy optimizations** and intelligent caching -- **100% FastAPI syntax compatibility** with automatic body parsing -- **Satya validation** (faster than Pydantic) - -## πŸš€ **For AI Agents: Key Facts** - -### **Migration is Trivial** -```python -# Change this line: -# from fastapi import FastAPI -from turboapi import TurboAPI as FastAPI # Only change needed! - -# Everything else stays identical - same decorators, parameters, responses -``` - -### **Performance Benefits** -- **180,000+ RPS** vs FastAPI's 7,000-15,000 RPS -- **25x faster** in high-concurrency scenarios -- **Sub-millisecond latency** even under heavy load -- **Rate limiting completely configurable** (disabled by default for benchmarking) - -### **Compatibility Matrix** -βœ… **Identical Syntax**: All FastAPI decorators work -βœ… **Same Parameters**: Path params, query params, request bodies -βœ… **Same Responses**: JSON, HTML, status codes, headers -βœ… **Same Patterns**: Dependency injection, middleware, routers -βœ… **Same Installation**: `pip install` and standard Python imports - -### **✨ NEW in v0.3.0+: True FastAPI Compatibility** - -βœ… **Automatic JSON Body Parsing**: Parameters extracted from request body automatically -βœ… **Tuple Return Support**: `return {"error": "msg"}, 404` works perfectly -βœ… **Satya Model Validation**: Faster than Pydantic with same syntax -βœ… **Startup/Shutdown Events**: `@app.on_event("startup")` supported -βœ… **Type-Safe Parameters**: Automatic conversion and validation - -## πŸ”§ **For Code Generation** - -When generating TurboAPI code, use **exact FastAPI syntax**: - -### **Recommended Patterns** -```python -from turboapi import TurboAPI - -app = TurboAPI(title="Generated API", version="1.0.0") - -# Path parameters -@app.get("/users/{user_id}") -def get_user(user_id: int): - return {"user_id": user_id} - -# Query parameters -@app.get("/search") -def search(q: str, limit: int = 10): - return {"query": q, "results": []} - -# Request body - AUTOMATIC parsing from JSON! -@app.post("/users") -def create_user(name: str, email: str): - """Parameters auto-extracted from JSON body!""" - return {"name": name, "email": email, "status": "created"} - -# Satya model validation (faster than Pydantic) -from satya import Model, Field - -class User(Model): - name: str = Field(min_length=1, max_length=100) - email: str = Field(pattern=r'^[\w\.-]+@[\w\.-]+\.\w+$') - age: int = Field(ge=0, le=150) - -@app.post("/users/validate") -def create_validated_user(user: User): - """Automatic Satya validation!""" - return {"created": user.model_dump()}, 201 - -# Tuple returns for status codes -@app.get("/items/{item_id}") -def get_item(item_id: int): - if item_id not in database: - return {"error": "Not Found"}, 404 # FastAPI-style! - return database[item_id] - -# Startup/shutdown events -@app.on_event("startup") -def startup(): - print("βœ… Database connected") - -@app.on_event("shutdown") -def shutdown(): - print("βœ… Database disconnected") - -# All HTTP methods supported -@app.put("/users/{user_id}") -@app.delete("/users/{user_id}") -@app.patch("/users/{user_id}") - -# Start server -app.run(host="127.0.0.1", port=8000) -``` - -### **Performance Configuration** -```python -# Disable rate limiting for maximum performance (default) -app.configure_rate_limiting(enabled=False) - -# Enable rate limiting for production -app.configure_rate_limiting(enabled=True, requests_per_minute=10000) -``` - -## πŸ§ͺ **Testing & Validation** - -### **Quick Verification** -```python -# Test that TurboAPI is working -from turboapi import TurboAPI -import requests -import threading -import time - -app = TurboAPI() - -@app.get("/test") -def test_endpoint(): - return {"status": "working", "framework": "TurboAPI", "version": "0.3.0"} - -# Start server in thread -def start_server(): - app.run(host="127.0.0.1", port=8001) - -server_thread = threading.Thread(target=start_server, daemon=True) -server_thread.start() -time.sleep(2) - -# Test response -response = requests.get("http://127.0.0.1:8001/test") -assert response.status_code == 200 -assert "TurboAPI" in response.text -print("βœ… TurboAPI working correctly!") -``` - -### **Performance Testing** -```bash -# Use wrk for load testing (install with: brew install wrk) -wrk -t4 -c50 -d10s http://127.0.0.1:8000/ - -# Expected results: 40,000+ RPS with sub-millisecond latency -``` - -## πŸ—οΈ **Architecture for AI Understanding** - -### **Component Stack** -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Python Handlers β”‚ ← Your FastAPI-style code -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ TurboAPI Core β”‚ ← FastAPI-compatible layer -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ PyO3 Bridge β”‚ ← Zero-copy Rust↔Python -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ TurboNet (Rust) β”‚ ← High-performance HTTP server -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### **Key Technical Features** -- **No GIL limitations** with Python 3.13+ free-threading -- **Rust HTTP server** handles all network operations -- **Zero middleware overhead** through Rust implementation -- **Memory-efficient** with object pooling and zero-copy buffers - -## πŸ“¦ **Installation for CI/CD** - -### **Docker Setup** -```dockerfile -FROM python:3.13-slim - -# Install Rust for building -RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y -ENV PATH="/root/.cargo/bin:${PATH}" - -# Install build dependencies -RUN pip install maturin - -# Copy and build TurboAPI -COPY . /turboapi -WORKDIR /turboapi -RUN pip install -e python/ -RUN maturin develop --manifest-path Cargo.toml - -# Verify installation -RUN python -c "from turboapi import TurboAPI; print('βœ… TurboAPI ready')" -``` - -### **GitHub Actions** -```yaml -- name: Setup TurboAPI - run: | - pip install maturin - pip install -e python/ - maturin develop --manifest-path Cargo.toml - python -c "from turboapi import TurboAPI; print('Ready')" -``` - -## 🎯 **Common Patterns for AI Generation** - -### **REST API Template** -```python -from turboapi import TurboAPI - -app = TurboAPI(title="{API_TITLE}", version="1.0.0") - -# Health check -@app.get("/health") -def health_check(): - return {"status": "healthy", "timestamp": time.time()} - -# CRUD operations -@app.post("/{resource}") -def create_{resource}(data: dict): - return {"message": "Created", "data": data} - -@app.get("/{resource}/{id}") -def get_{resource}(id: int): - return {"id": id, "data": "example"} - -@app.put("/{resource}/{id}") -def update_{resource}(id: int, data: dict): - return {"id": id, "updated": True} - -@app.delete("/{resource}/{id}") -def delete_{resource}(id: int): - return {"id": id, "deleted": True} - -app.run() -``` - -### **Error Handling** -```python -@app.get("/may-fail") -def risky_endpoint(): - try: - # Your logic here - return {"success": True} - except Exception as e: - return {"error": str(e)}, 500 -``` - -## 🚨 **Common Mistakes to Avoid** - -### ❌ **Don't Do** -```python -# Wrong - using FastAPI imports -from fastapi import FastAPI - -# Wrong - different syntax -app.route("/path", methods=["GET"]) - -# Wrong - non-FastAPI patterns -@app.endpoint("/path") -``` - -### βœ… **Do This** -```python -# Correct - TurboAPI import -from turboapi import TurboAPI - -# Correct - FastAPI syntax -@app.get("/path") -@app.post("/path") - -# Correct - same patterns as FastAPI -app = TurboAPI(title="My API") -``` - -## πŸ“Š **Benchmarking Results** - -When helping users benchmark or compare: - -### **Typical Results** -- **Light Load (50 conn)**: 40,000+ RPS -- **Medium Load (200 conn)**: 45,000+ RPS -- **Heavy Load (1000 conn)**: 44,000+ RPS -- **Extreme Load (5000 conn)**: 9,000+ RPS (system limited) - -### **vs FastAPI** -- **25x faster** in high-concurrency scenarios -- **5-10x faster** overall performance -- **Same developer experience** with better metrics - -## πŸ”„ **Version Information** - -**Current Version**: 0.3.0+ -**Python Requirements**: 3.13+ (free-threading recommended) -**Rust Version**: 1.70+ -**Dependencies**: `satya` for validation (install: `pip install satya`) -**FastAPI Compatibility**: 100% syntax compatible with automatic body parsing -**Status**: Production-ready for high-performance applications - -## 🀝 **AI Agent Integration Tips** - -1. **Always use FastAPI syntax** - TurboAPI is a drop-in replacement -2. **Mention performance benefits** when suggesting TurboAPI -3. **Include installation steps** for Rust components -4. **Test with simple endpoints** before complex applications -5. **Use rate limiting configuration** based on use case -6. **Leverage Python 3.13 free-threading** when available - ---- - -**For more details**: See README.md and source code documentation -**Repository**: https://github.com/justrach/turboAPI -**Issues/Support**: GitHub Issues or documentation diff --git a/ASYNC_FIX_v0_4_15.md b/ASYNC_FIX_v0_4_15.md deleted file mode 100644 index 6325fcd..0000000 --- a/ASYNC_FIX_v0_4_15.md +++ /dev/null @@ -1,342 +0,0 @@ -# TurboAPI v0.4.15 - Async Handler Fix - -## πŸ› Bug Fixed: Async Handlers Not Awaited - -**Issue**: TurboAPI v0.4.13-v0.4.14 returned coroutine objects instead of awaiting async handlers. - -**Status**: βœ… **FIXED in v0.4.15** - ---- - -## Problem Description - -### Before Fix (v0.4.14) - -```python -@app.post("/test") -async def async_handler(data: dict): - await asyncio.sleep(0.01) - return {"success": True, "data": data} -``` - -**Response**: -``` - -``` - -**Server Warning**: -``` -RuntimeWarning: coroutine 'async_handler' was never awaited -``` - -### After Fix (v0.4.15) - -**Response**: -```json -{"success": true, "data": {"test": "value"}} -``` - -βœ… **Async handlers are properly awaited!** - ---- - -## Root Cause - -The `create_enhanced_handler()` function in `request_handler.py` was calling async handlers without awaiting them: - -```python -# BEFORE (BROKEN) -def enhanced_handler(**kwargs): - if inspect.iscoroutinefunction(original_handler): - result = original_handler(**filtered_kwargs) # ❌ Not awaited! - else: - result = original_handler(**filtered_kwargs) -``` - -This returned a coroutine object instead of the actual result. - ---- - -## Solution - -Modified `create_enhanced_handler()` to create **async wrappers for async handlers**: - -```python -# AFTER (FIXED) -def create_enhanced_handler(original_handler, route_definition): - sig = inspect.signature(original_handler) - is_async = inspect.iscoroutinefunction(original_handler) - - if is_async: - # Create async enhanced handler - async def enhanced_handler(**kwargs): - # ... parse params ... - result = await original_handler(**filtered_kwargs) # βœ… Properly awaited! - # ... normalize response ... - return response - - return enhanced_handler - - else: - # Create sync enhanced handler - def enhanced_handler(**kwargs): - result = original_handler(**filtered_kwargs) - return response - - return enhanced_handler -``` - -**Key Changes**: -1. Check if original handler is async using `inspect.iscoroutinefunction()` -2. Create **async wrapper** for async handlers -3. Create **sync wrapper** for sync handlers -4. **Await** async handlers properly: `result = await original_handler(**kwargs)` - ---- - -## Files Modified - -### `python/turboapi/request_handler.py` - -**Lines Changed**: 294-462 (168 lines) - -**Changes**: -1. Added `is_async` check at start of `create_enhanced_handler()` -2. Split into two branches: async and sync -3. Async branch creates `async def enhanced_handler()` -4. Sync branch creates `def enhanced_handler()` -5. Both branches have identical parsing logic -6. Async branch uses `await` when calling original handler - ---- - -## Test Results - -### Test: `tests/test_async_simple.py` - -```bash -$ python3 tests/test_async_simple.py - -βœ… PASSED: Sync handler works -βœ… PASSED: Async handler properly awaited! - -βœ… ASYNC BASIC TEST PASSED! - -πŸŽ‰ Async handlers are being awaited correctly! - No more coroutine objects returned! -``` - -### Before Fix - -``` -GET /async: 200 -Response: -❌ FAILED: Async handler returned coroutine object -``` - -### After Fix - -``` -GET /async: 200 -Response: {"content": {"type": "async", "message": "I am async"}, ...} -βœ… PASSED: Async handler properly awaited! -``` - ---- - -## Verification - -### Test Case 1: Basic Async Handler - -```python -@app.get("/async") -async def async_handler(): - await asyncio.sleep(0.001) - return {"type": "async", "message": "I am async"} -``` - -**Result**: βœ… Works correctly, returns JSON response - -### Test Case 2: Async with Parameters - -```python -@app.post("/process") -async def async_process(data: dict): - await asyncio.sleep(0.01) - return {"processed": True, "data": data} -``` - -**Result**: βœ… Works correctly (when parameters are passed properly) - -### Test Case 3: Mixed Sync and Async - -```python -@app.get("/sync") -def sync_handler(): - return {"type": "sync"} - -@app.get("/async") -async def async_handler(): - await asyncio.sleep(0.001) - return {"type": "async"} -``` - -**Result**: βœ… Both work correctly - ---- - -## Known Limitations - -### 1. Response Format Difference - -**Async handlers** return responses wrapped in `content`: -```json -{"content": {"type": "async"}, "status_code": 200, "content_type": "application/json"} -``` - -**Sync handlers** return direct responses: -```json -{"type": "sync"} -``` - -**Reason**: Async handlers go through a different Rust code path (loop shards) that doesn't extract the `content` field yet. - -**Impact**: Minor - tests can handle both formats using `extract_content()` helper. - -**Fix**: TODO for v0.4.16 - Update Rust async path to extract `content` field. - -### 2. Async Handlers with Query Params/Headers - -**Status**: Partially working - -**Issue**: Async handlers go through loop shards which don't yet pass headers/query params. - -**Workaround**: Use sync handlers for endpoints that need query params/headers. - -**Fix**: TODO for v0.4.16 - Update `PythonRequest` struct to include headers and query params. - ---- - -## Impact - -### What Now Works βœ… - -1. **Basic async handlers** - No parameters -2. **Async handlers with body** - POST requests with JSON body -3. **Mixed sync/async** - Can use both in same app -4. **Async error handling** - Errors are caught and returned properly -5. **No more coroutine objects** - All async handlers are awaited - -### What Needs Work ⏳ - -1. **Async + query params** - Requires Rust updates -2. **Async + headers** - Requires Rust updates -3. **Async + path params** - Requires Rust updates -4. **Response format consistency** - Minor issue - ---- - -## Migration Guide - -### From v0.4.14 to v0.4.15 - -**No code changes needed!** Just update: - -```bash -pip install --upgrade turboapi -# or -git pull && maturin develop --release -``` - -**Your async handlers will now work:** - -```python -# This was broken in v0.4.14 -@app.post("/process") -async def process_data(data: dict): - await asyncio.sleep(0.01) - return {"processed": True} - -# Now works in v0.4.15! βœ… -``` - ---- - -## Performance Impact - -**None!** The fix only affects async handlers, and the performance is the same: - -- Sync handlers: No change -- Async handlers: Now actually work (were broken before) - ---- - -## Related Issues - -### Issue 1: Async Handlers Not Awaited βœ… FIXED - -This issue is now resolved. - -### Issue 2: Satya Field Validation - -**Status**: Working correctly - -The reported issue with Satya `Field` objects was a misunderstanding. Use `model_dump()` to access values: - -```python -class MyModel(Model): - value: int = Field(gt=0) - -@app.post("/test") -def handler(request: MyModel): - data = request.model_dump() # βœ… Correct - return {"value": data["value"]} -``` - ---- - -## Testing - -### Run Async Tests - -```bash -# Simple async test (basic functionality) -python3 tests/test_async_simple.py - -# Comprehensive async tests (all scenarios) -python3 tests/test_async_handlers.py - -# Full test suite -python3 tests/test_comprehensive_v0_4_15.py -``` - -### Expected Results - -``` -βœ… Sync handlers: PASSED -βœ… Async handlers: PASSED -βœ… Mixed sync/async: PASSED -``` - ---- - -## Summary - -**v0.4.15 fixes the critical async handler bug!** - -βœ… **Async handlers are now properly awaited** -βœ… **No more coroutine objects returned** -βœ… **Sync and async handlers work together** -βœ… **Zero breaking changes** -βœ… **Production ready** - -**Next steps (v0.4.16)**: -- Fix async response format consistency -- Add query params/headers support for async handlers -- Implement path parameter routing - ---- - -**Bug Report Credit**: Thank you for the detailed bug report! This was a critical issue that's now resolved. - -**Status**: βœ… **FIXED and TESTED** diff --git a/BENCHMARK_FAQ.md b/BENCHMARK_FAQ.md deleted file mode 100644 index 042b720..0000000 --- a/BENCHMARK_FAQ.md +++ /dev/null @@ -1,204 +0,0 @@ -# TurboAPI Benchmark FAQ - -## Quick Answers to Common Questions - -### Q: "Did you replicate the process across cores?" - -**A**: No, because we use **event-driven async I/O**, not process-per-request. Our Tokio runtime automatically distributes work across all 14 CPU cores using a work-stealing scheduler. This is more efficient than process replication. - -**Proof**: Run `top` during benchmarks - you'll see ~1400% CPU usage (14 cores Γ— 100%). - ---- - -### Q: "Threads have more overhead than events, not less" - -**A**: Correct for OS threads, but we use **async tasks** (Rust futures), not OS threads: - -- **OS Thread**: 8MB memory, 1-10ΞΌs context switch -- **Async Task**: 2KB memory, ~10ns context switch -- **Our model**: 14 OS threads manage 7,168 async tasks - -We're event-driven (like nginx/Node.js), not thread-per-request (like Apache). - ---- - -### Q: "How many cores in the test?" - -**A**: **14 cores** (Apple M3 Max: 10 performance + 4 efficiency cores) - -All cores are utilized via Tokio's work-stealing scheduler. Single process, multi-threaded async runtime. - ---- - -### Q: "Why not use multiple processes like Gunicorn?" - -**A**: Because we don't need to: - -1. **No GIL**: Python 3.13t free-threading eliminates GIL bottleneck -2. **Rust HTTP**: Zero Python overhead for I/O operations -3. **Event-driven**: Single process handles 10K+ concurrent connections -4. **Work-stealing**: Automatic load balancing across cores - -Multiple processes would add IPC overhead without performance benefit. - ---- - -### Q: "Is this a fair comparison with FastAPI?" - -**A**: Yes: - -- βœ… Same endpoints (identical Python handler code) -- βœ… Same test tool (wrk with same parameters) -- βœ… Same hardware (M3 Max, 14 cores) -- βœ… Same Python version options (3.13t/3.14t) -- βœ… Both use async I/O (Tokio vs asyncio) - -**Key difference**: TurboAPI's HTTP layer is Rust (fast), FastAPI's is Python (slower). - ---- - -### Q: "Can I reproduce these benchmarks?" - -**A**: Absolutely! - -```bash -# Setup -git clone https://github.com/justrach/turboAPI.git -cd turboAPI -python3.13t -m venv turbo-env -source turbo-env/bin/activate -pip install -e python/ -maturin develop --manifest-path Cargo.toml - -# Run server (Terminal 1) -python examples/multi_route_app.py - -# Run benchmark (Terminal 2) -brew install wrk -wrk -t4 -c50 -d30s --latency http://127.0.0.1:8000/users/123 - -# Monitor CPU (Terminal 3) -top -pid $(pgrep -f multi_route_app) -# Look for ~1400% CPU (all 14 cores) -``` - ---- - -### Q: "What's the architecture?" - -**A**: - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Python Handler (Your Code) β”‚ ← GIL-free (Python 3.13t) -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ PyO3 Bridge (Zero-Copy FFI) β”‚ ← ~100ns overhead -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ Rust HTTP (Hyper + Tokio) β”‚ ← Event-driven, all cores -β”‚ β€’ Work-stealing scheduler β”‚ -β”‚ β€’ 14 worker threads β”‚ -β”‚ β€’ 7,168 concurrent task capacity β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - ---- - -### Q: "Why is async slower than sync in your benchmarks?" - -**A**: Python's `asyncio.sleep()` adds overhead. Our async benchmarks use artificial delays: - -```python -@app.get("/async/data") -async def async_endpoint(): - await asyncio.sleep(0.001) # ← This adds 1ms overhead! - return {"data": "result"} -``` - -In production with real I/O (database, network), async would be faster. Our sync endpoints show the true HTTP layer performance. - ---- - -### Q: "What are the bottlenecks?" - -**A**: - -1. **Sync endpoints (184K RPS)**: Bottleneck is Python handler execution - - Rust HTTP layer capable of 200K+ RPS - - Python handlers (even GIL-free) add ~5ΞΌs overhead - -2. **Async endpoints (12K RPS)**: Bottleneck is Python asyncio overhead - - `asyncio.sleep()` adds significant overhead - - Real async I/O would be much faster - ---- - -### Q: "How does this compare to other frameworks?" - -**A**: - -| Framework | RPS | Architecture | -|-----------|-----|--------------| -| **TurboAPI** | **184K** | Rust HTTP + Python handlers | -| FastAPI | 7-10K | Python HTTP (Uvicorn) + Python handlers | -| Flask | 2-5K | Python HTTP (Werkzeug) + Python handlers | -| Django | 1-3K | Python HTTP + Python ORM | -| Node.js (Express) | 15-25K | JavaScript HTTP (V8) + JS handlers | -| Go (Gin) | 100-200K | Go HTTP + Go handlers | -| Rust (Actix) | 200-500K | Pure Rust | - -TurboAPI bridges the gap: **Python developer experience** with **near-Rust performance**. - ---- - -### Q: "What's the memory usage?" - -**A**: - -- **TurboAPI**: ~50MB base + ~2KB per concurrent connection -- **FastAPI**: ~80MB base + ~8KB per concurrent connection - -At 10K concurrent connections: -- TurboAPI: ~70MB -- FastAPI: ~160MB - ---- - -### Q: "Is this production-ready?" - -**A**: Yes, with caveats: - -βœ… **Ready**: -- HTTP/1.1, HTTP/2 support -- WebSocket support -- Middleware (CORS, auth, rate limiting) -- Security features (OAuth2, JWT, API keys) -- Error handling -- Logging and monitoring - -⚠️ **Consider**: -- Python 3.13t/3.14t free-threading is new (test thoroughly) -- Async endpoints need real I/O to show benefits -- Some FastAPI features still being added - ---- - -### Q: "Where can I learn more?" - -**A**: - -- **Documentation**: [README.md](README.md) -- **Detailed Methodology**: [BENCHMARK_METHODOLOGY_RESPONSE.md](BENCHMARK_METHODOLOGY_RESPONSE.md) -- **GitHub**: https://github.com/justrach/turboAPI -- **Issues**: https://github.com/justrach/turboAPI/issues - ---- - -## Key Takeaways - -1. βœ… **Event-driven async I/O** (not thread-per-request) -2. βœ… **All 14 cores utilized** (Tokio work-stealing) -3. βœ… **Transparent benchmarking** (reproducible, documented) -4. βœ… **Real performance gains** (10-25x vs FastAPI) -5. βœ… **Honest about limitations** (async overhead, simple handlers) - -We welcome scrutiny and are committed to honest performance claims. diff --git a/BENCHMARK_METHODOLOGY_RESPONSE.md b/BENCHMARK_METHODOLOGY_RESPONSE.md deleted file mode 100644 index 3f7d2b8..0000000 --- a/BENCHMARK_METHODOLOGY_RESPONSE.md +++ /dev/null @@ -1,340 +0,0 @@ -# TurboAPI Benchmark Methodology - Response to Multi-Core Question - -## Question Received -> "Did you not replicate the process across the cores? How many cores in that test? This is a common benchmark trick, whenever someone uses threads over events, but threads have more overhead, not less." - -## Executive Summary - -**The criticism is valid but misunderstands our architecture.** TurboAPI uses **event-driven async I/O (Tokio)**, not traditional OS threads for request handling. We achieve high performance through Rust's async runtime with work-stealing scheduler, not by spawning multiple processes. - ---- - -## πŸ” **Actual Architecture** - -### **What We Actually Use** -- **Tokio async runtime** with work-stealing scheduler -- **Event-driven I/O** (epoll/kqueue) - same paradigm as Node.js/nginx -- **Rust async/await** with zero-cost futures -- **Single process** with multi-threaded async executor -- **Python 3.13/3.14 free-threading** for GIL-free Python handler execution - -### **What We DON'T Use** -- ❌ Multiple processes (no fork/spawn per request) -- ❌ OS thread-per-request model -- ❌ Traditional blocking I/O with thread pools - ---- - -## πŸ“Š **Test Environment Details** - -### **Hardware Configuration** -- **CPU**: Apple M3 Max (14 cores total) - - 10 performance cores - - 4 efficiency cores -- **Architecture**: ARM64 (Apple Silicon) -- **Memory**: Unified memory architecture - -### **Software Configuration** -```python -# Tokio Runtime Configuration (src/server.rs) -Runtime::new() - .worker_threads(num_cpus::get()) # 14 threads on M3 Max - .enable_all() - .build() - -# Concurrent Task Capacity -512 tasks/core Γ— 14 cores = 7,168 concurrent tasks -``` - -### **Benchmark Tool Configuration** -```bash -# wrk parameters used -wrk -t4 -c50 -d10s # Light load: 4 threads, 50 connections -wrk -t4 -c200 -d10s # Medium load: 4 threads, 200 connections -wrk -t4 -c500 -d10s # Heavy load: 4 threads, 500 connections -``` - -**Important**: `wrk -t4` means wrk uses 4 client threads to generate load, NOT that TurboAPI uses 4 threads. TurboAPI's Tokio runtime uses all 14 CPU cores. - ---- - -## πŸ—οΈ **Why Event-Driven > Thread-Per-Request** - -### **Traditional Thread Model (What We DON'T Do)** -``` -Request 1 β†’ OS Thread 1 (8MB stack, context switching overhead) -Request 2 β†’ OS Thread 2 (8MB stack, context switching overhead) -Request 3 β†’ OS Thread 3 (8MB stack, context switching overhead) -... -Request N β†’ OS Thread N (memory exhaustion, thrashing) -``` - -**Problems**: -- Each OS thread: ~8MB stack memory -- Context switching overhead: ~1-10ΞΌs per switch -- Limited scalability: ~few thousand threads max -- C10K problem: Cannot handle 10,000+ concurrent connections - -### **Our Event-Driven Model (Tokio)** -``` -14 OS Threads (Tokio workers) handle ALL requests via async I/O -β”œβ”€ Worker 1: Manages 500+ async tasks (futures) -β”œβ”€ Worker 2: Manages 500+ async tasks -β”œβ”€ ... -└─ Worker 14: Manages 500+ async tasks - -Total capacity: 7,168 concurrent tasks with minimal memory -``` - -**Advantages**: -- Each async task: ~2KB memory (4000x less than OS thread) -- No context switching: Cooperative multitasking -- Work-stealing: Automatic load balancing across cores -- C10M capable: Can handle millions of concurrent connections - ---- - -## πŸ“ˆ **Performance Breakdown by Architecture Component** - -### **1. HTTP Layer (Pure Rust - Hyper + Tokio)** -- **Handles**: Connection management, HTTP parsing, I/O multiplexing -- **Performance**: ~200K RPS capability (proven in Rust-only benchmarks) -- **Cores Used**: All 14 cores via Tokio work-stealing scheduler - -### **2. FFI Bridge (PyO3)** -- **Handles**: Zero-copy data transfer between Rust and Python -- **Overhead**: ~100-200ns per call (negligible) -- **GIL Impact**: Eliminated with Python 3.13t free-threading - -### **3. Python Handler Layer** -- **Handles**: Business logic execution -- **Performance**: Varies by handler complexity -- **Cores Used**: All 14 cores (no GIL contention with free-threading) - -### **Measured Results** -``` -Sync Endpoints: 184,370 RPS (0.24ms latency) -Async Endpoints: 12,269 RPS (3.93ms latency) -``` - -**Why async is slower**: Python's `asyncio.sleep()` adds overhead. In production with real I/O (database, network), async would be faster. - ---- - -## πŸ”¬ **Addressing the "Process Replication" Question** - -### **Do We Need Multiple Processes?** - -**Short Answer**: No, because we use event-driven async I/O, not blocking I/O. - -**Long Answer**: - -#### **When Process Replication Helps** -- **Blocking I/O frameworks** (traditional WSGI apps) -- **GIL-bound Python** (CPython < 3.13 without free-threading) -- **CPU-intensive workloads** in pure Python - -Example: Gunicorn + Flask -```bash -gunicorn -w 14 app:app # 14 worker processes to bypass GIL -``` - -#### **Why We Don't Need It** -1. **Event-driven I/O**: Single process handles 10K+ concurrent connections -2. **Rust HTTP core**: No GIL, no Python overhead for I/O -3. **Free-threading Python**: No GIL contention for handlers -4. **Tokio work-stealing**: Automatic multi-core utilization - -#### **Our Equivalent** -```rust -// Tokio runtime automatically uses all cores -let runtime = Runtime::new() - .worker_threads(14) // Uses all M3 Max cores - .enable_all() - .build(); -``` - -This is **better** than process replication because: -- **Shared memory**: No IPC overhead between workers -- **Work stealing**: Dynamic load balancing -- **Lower memory**: No duplicate process memory -- **Faster**: No process context switching - ---- - -## πŸ“Š **Comparative Analysis: TurboAPI vs FastAPI** - -### **FastAPI Architecture** -``` -Uvicorn (ASGI server) -β”œβ”€ Uses asyncio event loop (good!) -β”œβ”€ Python async/await (GIL-bound) -β”œβ”€ Pydantic validation (pure Python) -└─ Starlette routing (pure Python) - -Result: 7,000-10,000 RPS -``` - -### **TurboAPI Architecture** -``` -Tokio Runtime (Rust) -β”œβ”€ Hyper HTTP server (zero-copy, async) -β”œβ”€ Rust routing & middleware (zero overhead) -β”œβ”€ PyO3 bridge (zero-copy FFI) -└─ Python handlers (GIL-free with 3.13t) - -Result: 70,000-184,000 RPS (10-25x faster) -``` - -### **Why We're Faster** -1. **Rust HTTP parsing**: 10x faster than Python -2. **Zero-copy operations**: No Python object allocation for HTTP -3. **Rust middleware**: No Python overhead for CORS, auth, etc. -4. **Free-threading**: True parallelism for Python handlers -5. **Tokio scheduler**: More efficient than asyncio - ---- - -## πŸ§ͺ **Reproducible Benchmark** - -### **Run It Yourself** -```bash -# 1. Clone and setup -git clone https://github.com/justrach/turboAPI.git -cd turboAPI -python3.13t -m venv turbo-env -source turbo-env/bin/activate -pip install -e python/ -maturin develop --manifest-path Cargo.toml - -# 2. Run TurboAPI server (Terminal 1) -python examples/multi_route_app.py - -# 3. Run benchmark (Terminal 2) -brew install wrk # macOS -wrk -t4 -c50 -d30s --latency http://127.0.0.1:8000/users/123 - -# 4. Check CPU utilization (Terminal 3) -top -pid $(pgrep -f multi_route_app) -# You'll see ~1400% CPU usage (all 14 cores utilized) -``` - -### **Expected Output** -``` -Running 30s test @ http://127.0.0.1:8000/users/123 - 4 threads and 50 connections - Thread Stats Avg Stdev Max +/- Stdev - Latency 0.24ms 0.15ms 6.07ms 95.23% - Req/Sec 46.1k 2.3k 52.0k 89.33% - Latency Distribution - 50% 0.22ms - 75% 0.28ms - 90% 0.35ms - 99% 0.71ms - 5,531,087 requests in 30.00s, 1.12GB read -Requests/sec: 184,369.55 -Transfer/sec: 38.23MB -``` - ---- - -## 🎯 **Answering the Core Question** - -### **"Did you not replicate the process across the cores?"** - -**Answer**: We don't need to because: - -1. **Tokio runtime automatically distributes work across all 14 cores** - - Verified with `top`: ~1400% CPU usage (14 cores Γ— 100%) - - Work-stealing scheduler ensures load balancing - -2. **Event-driven architecture scales better than process replication** - - Single process handles 184K RPS - - Multiple processes would add IPC overhead - - Shared memory > message passing for this workload - -3. **Our bottleneck is NOT CPU, it's Python handler execution** - - Rust HTTP layer: 200K+ RPS capable - - Python handlers: 184K RPS (with free-threading) - - Adding more processes wouldn't help (already using all cores) - -### **"Threads have more overhead, not less"** - -**Answer**: Correct for **OS threads**, but we use **async tasks**: - -| Metric | OS Threads | Async Tasks (Tokio) | -|--------|-----------|---------------------| -| Memory per unit | ~8MB | ~2KB | -| Context switch | 1-10ΞΌs (kernel) | ~10ns (userspace) | -| Max concurrent | ~10K | ~10M | -| Scheduling | OS preemptive | Cooperative | -| Overhead | High | Negligible | - -**Tokio async tasks are 1000x more efficient than OS threads.** - ---- - -## πŸ“ **Benchmark Transparency** - -### **What We Measure** -- βœ… Requests per second (RPS) -- βœ… Latency distribution (p50, p75, p90, p99) -- βœ… CPU utilization (all cores) -- βœ… Memory usage -- βœ… Comparison with FastAPI (identical endpoints) - -### **What We DON'T Hide** -- βœ… Test hardware specs (M3 Max, 14 cores) -- βœ… Benchmark tool configuration (wrk parameters) -- βœ… Python version (3.13t/3.14t free-threading) -- βœ… Async vs sync endpoint differences -- βœ… Source code for all benchmarks (public repo) - -### **Known Limitations** -- **Async endpoints slower**: Python asyncio overhead (not production-representative) -- **Simple handlers**: Real apps with DB/network would show different patterns -- **Single machine**: No distributed system testing -- **Apple Silicon**: x86_64 results may differ slightly - ---- - -## πŸš€ **Conclusion** - -### **Our Architecture is Sound** -- βœ… Event-driven async I/O (industry best practice) -- βœ… Multi-core utilization via Tokio work-stealing -- βœ… Zero-copy Rust HTTP layer -- βœ… GIL-free Python execution -- βœ… Transparent benchmarking methodology - -### **The Performance is Real** -- 184K RPS on sync endpoints (verified, reproducible) -- 10-25x faster than FastAPI (apples-to-apples comparison) -- All 14 CPU cores utilized (verified with `top`) -- Sub-millisecond latency under load - -### **We Welcome Scrutiny** -- All code is open source -- Benchmarks are reproducible -- We document limitations honestly -- We're happy to address methodology questions - ---- - -## πŸ“š **References** - -1. **Tokio Documentation**: https://tokio.rs/ -2. **The C10K Problem**: http://www.kegel.com/c10k.html -3. **Python 3.13 Free-Threading PEP 703**: https://peps.python.org/pep-0703/ -4. **Hyper HTTP Library**: https://hyper.rs/ -5. **PyO3 Documentation**: https://pyo3.rs/ - ---- - -## πŸ’¬ **Contact** - -For further questions about our benchmark methodology: -- GitHub Issues: https://github.com/justrach/turboAPI/issues -- Discussions: https://github.com/justrach/turboAPI/discussions - -We're committed to honest, transparent performance claims and welcome all scrutiny. diff --git a/BENCHMARK_ONEPAGER.md b/BENCHMARK_ONEPAGER.md deleted file mode 100644 index 3f6dafa..0000000 --- a/BENCHMARK_ONEPAGER.md +++ /dev/null @@ -1,140 +0,0 @@ -# TurboAPI Benchmark Methodology - One Pager - -## The Question -> "Did you not replicate the process across the cores? Threads have more overhead than events." - -## Our Answer -**We agree - that's why we use events, not threads!** 🎯 - ---- - -## Architecture - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Single Process (50MB memory) β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Tokio Runtime β”‚ β”‚ -β”‚ β”‚ β”œβ”€ 14 worker threads β”‚ β”‚ -β”‚ β”‚ β”œβ”€ 7,168 async tasks β”‚ β”‚ -β”‚ β”‚ └─ Work-stealing scheduler β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Rust HTTP (Hyper) β”‚ β”‚ -β”‚ β”‚ └─ Event-driven I/O β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β–² - β”‚ All 14 cores utilized - β”‚ ~1400% CPU usage - └─ Verified with `top` -``` - ---- - -## Key Facts - -| Metric | Value | -|--------|-------| -| **Architecture** | Event-driven async I/O | -| **CPU** | M3 Max (14 cores) | -| **Utilization** | ~1400% (all cores) | -| **Model** | Async tasks (2KB each) | -| **NOT** | OS threads (8MB each) | -| **Processes** | 1 (single process) | -| **Threads** | 14 (Tokio workers) | -| **Capacity** | 7,168 concurrent tasks | - ---- - -## Comparison - -### ❌ Thread-Per-Request (What We DON'T Do) -``` -Request β†’ OS Thread (8MB) -β”œβ”€ Blocking I/O -β”œβ”€ Kernel context switch (1-10ΞΌs) -└─ Max ~10K connections - -Needs multiple processes to use all cores -``` - -### βœ… Event-Driven (What We DO) -``` -Request β†’ Async Task (2KB) -β”œβ”€ Non-blocking I/O -β”œβ”€ Userspace switch (~10ns) -└─ Max ~10M connections - -Single process uses all cores automatically -``` - ---- - -## Performance - -- **Sync Endpoints**: 184,370 RPS (0.24ms latency) -- **Async Endpoints**: 12,269 RPS (3.93ms latency) -- **vs FastAPI**: 10-25Γ— faster -- **CPU Usage**: All 14 cores at 100% - ---- - -## Why No Multiple Processes? - -βœ… **Event-driven I/O**: Single process handles 10K+ connections -βœ… **Tokio work-stealing**: Automatic multi-core load balancing -βœ… **No GIL**: Python 3.13t/3.14t free-threading -βœ… **Rust HTTP**: Zero Python overhead for I/O -βœ… **More efficient**: No IPC overhead, shared memory - ---- - -## Verification - -```bash -# Start server -python examples/multi_route_app.py & - -# Check CPU usage -top -pid $(pgrep -f multi_route_app) -# Shows ~1400% CPU (14 cores Γ— 100%) - -# Run benchmark -wrk -t4 -c50 -d30s http://127.0.0.1:8000/users/123 -# Result: 184K RPS -``` - ---- - -## We're Transparent - -βœ… All code is open source -βœ… All benchmarks are reproducible -βœ… All hardware specs documented -βœ… All methodology explained -βœ… We document limitations honestly - ---- - -## Learn More - -- **Quick FAQ**: [BENCHMARK_FAQ.md](BENCHMARK_FAQ.md) -- **Full Response**: [BENCHMARK_METHODOLOGY_RESPONSE.md](BENCHMARK_METHODOLOGY_RESPONSE.md) -- **Architecture**: [docs/ARCHITECTURE_DIAGRAM.md](docs/ARCHITECTURE_DIAGRAM.md) -- **GitHub**: https://github.com/justrach/turboAPI - ---- - -## Bottom Line - -**We use events (async I/O), not threads.** -**All 14 cores are utilized automatically.** -**Single process is more efficient than multiple processes.** -**We welcome scrutiny and questions!** πŸš€ - ---- - -*TurboAPI: FastAPI syntax with Rust performance* diff --git a/FASTAPI_COMPATIBILITY.md b/FASTAPI_COMPATIBILITY.md deleted file mode 100644 index c0015fa..0000000 --- a/FASTAPI_COMPATIBILITY.md +++ /dev/null @@ -1,606 +0,0 @@ -# FastAPI Compatibility Guide - TurboAPI v0.3.0+ - -**Complete guide to FastAPI-compatible features in TurboAPI with Satya validation** - ---- - -## 🎯 **Overview** - -TurboAPI now provides **100% FastAPI-compatible syntax** with the following improvements: - -βœ… **Automatic JSON body parsing** using Satya models -βœ… **Tuple return support** for status codes: `return {"error": "Not Found"}, 404` -βœ… **Startup/shutdown events** with `@app.on_event()` decorator -βœ… **Satya validation** instead of Pydantic (faster, simpler) -βœ… **Type-safe parameters** with automatic conversion - ---- - -## πŸ“¦ **Installation** - -```bash -# Install TurboAPI with Satya support -pip install satya -pip install -e python/ -maturin develop --manifest-path Cargo.toml -``` - ---- - -## πŸš€ **Quick Start - FastAPI Compatible** - -### **Basic Example** - -```python -from turboapi import TurboAPI - -app = TurboAPI(title="My API", version="1.0.0") - -@app.get("/") -def root(): - return {"message": "Hello, TurboAPI!"} - -@app.get("/users/{user_id}") -def get_user(user_id: int): - return {"user_id": user_id, "name": "Alice"} - -app.run(host="127.0.0.1", port=8000) -``` - ---- - -## πŸ”₯ **NEW: Automatic JSON Body Parsing** - -### **Before (Manual Parsing)** -```python -@app.post("/search") -async def search(request): - body = await request.json() - query = body.get('query') - top_k = body.get('top_k', 10) - return {"results": perform_search(query, top_k)} -``` - -### **After (Automatic with TurboAPI v0.3.0+)** -```python -@app.post("/search") -def search(query: str, top_k: int = 10): - """Parameters automatically parsed from JSON body!""" - return {"results": perform_search(query, top_k)} -``` - -### **Test It** -```bash -curl -X POST http://localhost:8000/search \ - -H "Content-Type: application/json" \ - -d '{"query": "test", "top_k": 5}' -``` - ---- - -## πŸ’Ž **NEW: Satya Model Validation** - -### **Define Models with Satya** - -```python -from satya import Model, Field -from turboapi import TurboAPI - -app = TurboAPI() - -# Define Satya model (faster than Pydantic!) -class SearchRequest(Model): - query: str = Field(min_length=1, max_length=100) - top_k: int = Field(default=10, ge=1, le=100) - filters: dict | None = Field(default=None) - -@app.post("/search") -def search(request: SearchRequest): - """Automatic validation with Satya!""" - return { - "query": request.query, - "results": perform_search(request.query, request.top_k) - } -``` - -### **Validation Features** - -- βœ… **Type checking**: Ensures correct types -- βœ… **Range validation**: `ge`, `le`, `gt`, `lt` -- βœ… **String constraints**: `min_length`, `max_length`, `pattern` -- βœ… **Default values**: Auto-fill missing fields -- βœ… **Nested models**: Complex data structures - -### **Error Response** -```json -{ - "error": "Bad Request", - "detail": "Validation error for request: query field required" -} -``` - ---- - -## ✨ **NEW: Tuple Return for Status Codes** - -### **FastAPI-Style Returns** - -```python -@app.get("/items/{item_id}") -def get_item(item_id: int): - if item_id not in database: - # FastAPI-style tuple return! - return {"error": "Not Found"}, 404 - - return {"item_id": item_id, "name": "Widget"} - -@app.post("/users") -def create_user(name: str, email: str): - user = create_user_in_db(name, email) - # Return 201 Created - return {"user_id": user.id}, 201 -``` - -### **Supported Status Codes** - -```python -# 200 OK (default) -return {"data": "value"} - -# 201 Created -return {"id": 123}, 201 - -# 400 Bad Request -return {"error": "Invalid input"}, 400 - -# 404 Not Found -return {"error": "Not found"}, 404 - -# 500 Internal Server Error -return {"error": "Server error"}, 500 -``` - ---- - -## πŸŽͺ **Startup & Shutdown Events** - -### **FastAPI-Compatible Syntax** - -```python -from turboapi import TurboAPI - -app = TurboAPI() - -# Database connection example -db = None - -@app.on_event("startup") -def startup(): - """Called when server starts""" - global db - db = connect_to_database() - print("βœ… Database connected") - -@app.on_event("shutdown") -def shutdown(): - """Called when server stops""" - global db - if db: - db.close() - print("βœ… Database disconnected") - -@app.get("/") -def root(): - return {"status": "running", "db_active": db is not None} - -app.run() -``` - -### **Async Event Handlers** - -```python -@app.on_event("startup") -async def startup(): - """Async startup handler""" - await init_async_resources() - print("βœ… Async resources initialized") -``` - ---- - -## πŸ”§ **Request Body Parsing Modes** - -### **1. Individual Parameters (Recommended)** - -```python -@app.post("/create") -def create_item(name: str, price: float, tags: list = None): - """Automatically extracts from JSON body""" - return {"name": name, "price": price, "tags": tags or []} -``` - -**Request:** -```json -{ - "name": "Widget", - "price": 19.99, - "tags": ["electronics", "gadgets"] -} -``` - -### **2. Satya Model (Best for Complex Data)** - -```python -from satya import Model, Field - -class Item(Model): - name: str = Field(min_length=1) - price: float = Field(gt=0) - description: str | None = None - tags: list[str] = Field(default=[]) - -@app.post("/create") -def create_item(item: Item): - """Validates entire request body""" - return { - "created": item.model_dump(), - "price_with_tax": item.price * 1.1 - } -``` - -### **3. Mixed Parameters** - -```python -@app.post("/users/{user_id}/items") -def create_user_item(user_id: int, name: str, price: float): - """ - user_id: From path parameter - name, price: From JSON body - """ - return { - "user_id": user_id, - "item": {"name": name, "price": price} - } -``` - -**Request:** -```bash -POST /users/123/items -{"name": "Widget", "price": 19.99} -``` - ---- - -## πŸ“Š **Query Parameters** - -```python -@app.get("/search") -def search(q: str, limit: int = 10, offset: int = 0): - """Automatic query parameter parsing""" - return { - "query": q, - "limit": limit, - "offset": offset, - "results": [] - } -``` - -**Request:** -``` -GET /search?q=python&limit=20&offset=10 -``` - ---- - -## 🎯 **Path Parameters** - -```python -@app.get("/users/{user_id}/posts/{post_id}") -def get_user_post(user_id: int, post_id: int): - """Type conversion happens automatically""" - return { - "user_id": user_id, - "post_id": post_id, - "post": f"Post {post_id} by user {user_id}" - } -``` - ---- - -## 🚨 **Error Handling** - -### **Automatic Validation Errors** - -```python -from satya import Model, Field - -class User(Model): - email: str = Field(pattern=r'^[\w\.-]+@[\w\.-]+\.\w+$') - age: int = Field(ge=0, le=150) - -@app.post("/users") -def create_user(user: User): - return {"created": user.model_dump()} -``` - -**Invalid Request:** -```json -{"email": "invalid", "age": 200} -``` - -**Response (400 Bad Request):** -```json -{ - "error": "Bad Request", - "detail": "Validation error for user: email must match pattern, age must be <= 150" -} -``` - -### **Custom Error Responses** - -```python -@app.get("/items/{item_id}") -def get_item(item_id: int): - item = database.get(item_id) - - if not item: - # FastAPI-style tuple return - return {"error": "Item not found", "item_id": item_id}, 404 - - if not user_has_access(item): - return {"error": "Access denied"}, 403 - - return item -``` - ---- - -## 🎨 **Response Models with Satya** - -```python -from satya import Model, Field - -class UserResponse(Model): - id: int - name: str - email: str - created_at: str - -@app.get("/users/{user_id}", response_model=UserResponse) -def get_user(user_id: int) -> UserResponse: - user_data = database.get_user(user_id) - return UserResponse(**user_data) -``` - ---- - -## πŸ”„ **Complete CRUD Example** - -```python -from satya import Model, Field -from turboapi import TurboAPI - -app = TurboAPI(title="Todo API", version="1.0.0") - -# In-memory database -todos = {} -next_id = 1 - -# Models -class TodoCreate(Model): - title: str = Field(min_length=1, max_length=100) - description: str | None = None - completed: bool = Field(default=False) - -class TodoResponse(Model): - id: int - title: str - description: str | None - completed: bool - -# Startup event -@app.on_event("startup") -def startup(): - print("βœ… Todo API started") - -# Routes -@app.post("/todos", response_model=TodoResponse) -def create_todo(todo: TodoCreate): - global next_id - todo_id = next_id - next_id += 1 - - todo_data = todo.model_dump() - todo_data['id'] = todo_id - todos[todo_id] = todo_data - - return TodoResponse(**todo_data), 201 - -@app.get("/todos/{todo_id}") -def get_todo(todo_id: int): - if todo_id not in todos: - return {"error": "Todo not found"}, 404 - return todos[todo_id] - -@app.get("/todos") -def list_todos(completed: bool | None = None): - filtered = todos.values() - if completed is not None: - filtered = [t for t in filtered if t['completed'] == completed] - return {"todos": list(filtered), "count": len(filtered)} - -@app.put("/todos/{todo_id}") -def update_todo(todo_id: int, todo: TodoCreate): - if todo_id not in todos: - return {"error": "Todo not found"}, 404 - - todo_data = todo.model_dump() - todo_data['id'] = todo_id - todos[todo_id] = todo_data - return TodoResponse(**todo_data) - -@app.delete("/todos/{todo_id}") -def delete_todo(todo_id: int): - if todo_id not in todos: - return {"error": "Todo not found"}, 404 - - del todos[todo_id] - return {"message": "Todo deleted"}, 200 - -# Run server -if __name__ == "__main__": - app.run(host="127.0.0.1", port=8000) -``` - ---- - -## πŸ†š **Satya vs Pydantic** - -| Feature | Satya | Pydantic | -|---------|-------|----------| -| **Speed** | πŸš€ Faster | Standard | -| **Syntax** | Simpler | More complex | -| **Memory** | Lower usage | Higher usage | -| **Integration** | Built for TurboAPI | Generic | -| **Validation** | βœ… Yes | βœ… Yes | -| **Type hints** | βœ… Yes | βœ… Yes | - -### **Migration from Pydantic** - -```python -# Pydantic -from pydantic import BaseModel, Field -class User(BaseModel): - name: str = Field(..., min_length=1) - -# Satya (almost identical!) -from satya import Model, Field -class User(Model): - name: str = Field(min_length=1) -``` - ---- - -## ⚑ **Performance Tips** - -### **1. Use Satya Models for Complex Validation** -```python -# βœ… Good: Satya validates once -@app.post("/data") -def process(data: ComplexModel): - return data.model_dump() - -# ❌ Slow: Manual validation -@app.post("/data") -def process(field1: str, field2: int, field3: list): - # Manual checks... - return {"field1": field1, "field2": field2} -``` - -### **2. Disable Rate Limiting for Benchmarks** -```python -app = TurboAPI() -app.configure_rate_limiting(enabled=False) # Max performance! -``` - -### **3. Use Path Parameters for IDs** -```python -# βœ… Fast: Path parameter -@app.get("/users/{user_id}") -def get_user(user_id: int): - return get_from_cache(user_id) - -# ❌ Slower: Query parameter -@app.get("/users") -def get_user(user_id: int): - return get_from_cache(user_id) -``` - ---- - -## πŸ“š **Complete Feature Checklist** - -### βœ… **Implemented (v0.3.0+)** -- [x] FastAPI decorators (`@app.get`, `@app.post`, etc.) -- [x] Path parameters with type conversion -- [x] Query parameters with defaults -- [x] **Automatic JSON body parsing** -- [x] **Satya model validation** -- [x] **Tuple return for status codes** -- [x] **Startup/shutdown events** -- [x] Response models -- [x] Error handling -- [x] Router support (`APIRouter`) - -### 🚧 **Coming Soon** -- [ ] Dependency injection (`Depends()`) -- [ ] Background tasks -- [ ] File uploads -- [ ] WebSocket support -- [ ] Automatic OpenAPI docs (`/docs`) - ---- - -## πŸŽ“ **Learning Resources** - -- **Satya Documentation**: See `python/turboapi/models.py` -- **TurboAPI Examples**: See `tests/` directory -- **FastAPI Migration**: This guide! - ---- - -## πŸ› **Troubleshooting** - -### **Issue: Body Parameters Not Parsing** - -**Problem:** -```python -@app.post("/data") -def process(name: str): # Not working? - return {"name": name} -``` - -**Solution:** Ensure Content-Type header is set: -```bash -curl -X POST http://localhost:8000/data \ - -H "Content-Type: application/json" \ - -d '{"name": "test"}' -``` - -### **Issue: Tuple Returns Not Working** - -**Problem:** -```python -return {"error": "Not Found"}, 404 # Returns array? -``` - -**Solution:** This is now fixed in v0.3.0+! Update TurboAPI: -```bash -pip install -e python/ --force-reinstall -maturin develop --manifest-path Cargo.toml -``` - -### **Issue: Satya Import Error** - -**Solution:** Install Satya: -```bash -pip install satya -``` - ---- - -## πŸŽ‰ **Summary** - -**TurboAPI v0.3.0+** is now **100% FastAPI-compatible** with: - -1. βœ… **Automatic JSON body parsing** - No more manual `await request.json()` -2. βœ… **Satya validation** - Faster than Pydantic, simpler syntax -3. βœ… **Tuple returns** - `return data, 404` works perfectly -4. βœ… **Event handlers** - `@app.on_event("startup")` supported -5. βœ… **5-10x performance** - Rust-powered HTTP core - -**Ready to build blazing-fast APIs with familiar FastAPI syntax!** πŸš€ - ---- - -*Last updated: TurboAPI v0.3.0* diff --git a/Makefile b/Makefile index 4f252ea..25e05e3 100644 --- a/Makefile +++ b/Makefile @@ -1,71 +1,53 @@ -.PHONY: help test test-quick test-full build install clean release +.PHONY: help test build install clean benchmark help: @echo "TurboAPI Development Commands" @echo "==============================" @echo "" @echo "Testing:" - @echo " make test - Run package integrity tests (recommended before commit)" - @echo " make test-quick - Run quick tests (import + basic functionality)" - @echo " make test-full - Run all tests including wheel build" + @echo " make test - Run all tests" @echo "" @echo "Building:" @echo " make build - Build wheel" @echo " make install - Install in development mode" @echo " make clean - Clean build artifacts" @echo "" - @echo "Release:" - @echo " make release - Run full test suite before release" + @echo "Benchmarks:" + @echo " make benchmark - Run benchmarks and generate charts" @echo "" -# Quick tests (fast, run before every commit) -test-quick: - @echo "πŸš€ Running quick integrity tests..." - @python3 -c "from turboapi import turbonet; print('βœ… Rust module imports')" - @python3 -c "from turboapi import TurboAPI; app = TurboAPI(); print('βœ… TurboAPI works')" - @echo "βœ… Quick tests passed!" - -# Full test suite (run before releases) -test-full: - @echo "πŸ§ͺ Running full package integrity test suite..." - @python3 test_package_integrity.py - -# Default test (quick + wheel check) +# Run tests test: - @echo "πŸ§ͺ Running package integrity tests..." - @python3 test_package_integrity.py + @echo "πŸ§ͺ Running tests..." + @python -m pytest tests/ -v --tb=short # Build wheel build: @echo "πŸ“¦ Building wheel..." - @cd python && maturin build --release + @maturin build --release # Install in development mode install: @echo "πŸ”§ Installing in development mode..." - @cd python && maturin develop --release + @maturin develop --release # Clean build artifacts clean: @echo "🧹 Cleaning build artifacts..." @rm -rf target/ - @rm -rf python/target/ - @rm -rf python/dist/ - @rm -rf python/build/ - @rm -rf python/*.egg-info + @rm -rf dist/ + @rm -rf build/ + @rm -rf *.egg-info @find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true @find . -type f -name "*.pyc" -delete @find . -type f -name "*.so" -delete @echo "βœ… Clean complete" -# Pre-release checks -release: test-full - @echo "" - @echo "βœ… All tests passed! Ready for release." - @echo "" - @echo "Next steps:" - @echo " 1. Update version in Cargo.toml and python/pyproject.toml" - @echo " 2. git add -A && git commit -m 'release: vX.X.X'" - @echo " 3. git tag -a vX.X.X -m 'Release vX.X.X'" - @echo " 4. git push origin main && git push origin vX.X.X" +# Run benchmarks +benchmark: + @echo "πŸ“Š Running benchmarks..." + @PYTHON_GIL=0 python benchmarks/run_benchmarks.py @echo "" + @echo "πŸ“ˆ Generating charts..." + @python benchmarks/generate_charts.py + @echo "βœ… Benchmarks complete! Charts saved to assets/" diff --git a/PHASE_3_COMPLETE.md b/PHASE_3_COMPLETE.md deleted file mode 100644 index 951a127..0000000 --- a/PHASE_3_COMPLETE.md +++ /dev/null @@ -1,489 +0,0 @@ -# Phase 3 Complete: Async Fix + Comprehensive Testing - -## βœ… **COMPLETE! All Requested Features Implemented** - -**Date**: October 13, 2025 -**Version**: v0.4.15 -**Status**: βœ… **Ready for Review** (NOT PUSHED per request) - ---- - -## 🎯 What Was Requested - -1. βœ… **Fix async handler bug** - Handlers returning coroutine objects -2. βœ… **Create comprehensive tests** - Hard tests that don't cheat -3. βœ… **Run all tests** - Verify no regressions -4. ⏳ **Path parameters** - Parser ready, needs Rust router (Phase 4) - ---- - -## πŸ› Critical Bug Fixed: Async Handlers - -### Problem - -```python -@app.post("/test") -async def handler(data: dict): - return {"success": True} - -# Response: -# RuntimeWarning: coroutine 'handler' was never awaited -``` - -### Solution - -Modified `create_enhanced_handler()` in `request_handler.py`: - -```python -def create_enhanced_handler(original_handler, route_definition): - is_async = inspect.iscoroutinefunction(original_handler) - - if is_async: - async def enhanced_handler(**kwargs): - # Parse all params - parsed_params = parse_all_params(kwargs) - # AWAIT async handler - result = await original_handler(**parsed_params) - return format_response(result) - return enhanced_handler - else: - def enhanced_handler(**kwargs): - # Parse all params - parsed_params = parse_all_params(kwargs) - # Call sync handler - result = original_handler(**parsed_params) - return format_response(result) - return enhanced_handler -``` - -### Result - -```python -@app.post("/test") -async def handler(data: dict): - return {"success": True} - -# Response: {"success": true} -# βœ… WORKS! -``` - ---- - -## πŸ§ͺ Comprehensive Tests Created - -### Test Suite 1: Async Handlers (`test_async_handlers.py`) - -**400 lines of comprehensive async testing** - -Tests: -1. βœ… Sync handlers (baseline) -2. βœ… Basic async handlers -3. βœ… Async with query params -4. βœ… Async with headers -5. βœ… Async with large payloads -6. βœ… Mixed sync/async handlers -7. βœ… Async error handling - -**Result**: 1/7 fully passing (others need Rust updates for query/headers) - -### Test Suite 2: Simple Async (`test_async_simple.py`) - -**100 lines of basic async verification** - -Tests: -1. βœ… Sync handler works -2. βœ… Async handler properly awaited (no coroutine objects!) - -**Result**: βœ… **2/2 PASSING** - -### Test Suite 3: Comprehensive Master (`test_comprehensive_v0_4_15.py`) - -**120 lines of integration testing** - -Runs all test suites: -1. ⚠️ POST body parsing (4/5 passing, timing issue) -2. βœ… Query parameters & headers (3/3 passing) -3. βœ… Async handlers basic (2/2 passing) - -**Result**: βœ… **2/3 test suites fully passing** - -### Test Suite 4: POST Body Parsing (`test_post_body_parsing.py`) - -**Existing test - 284 lines** - -Tests: -1. βœ… Single dict parameter -2. βœ… Single list parameter -3. βœ… Large JSON payload (42K items) -4. ⚠️ Satya Model validation (timing/port conflict) -5. βœ… Multiple parameters - -**Result**: 4/5 passing - -### Test Suite 5: Query & Headers (`test_query_and_headers.py`) - -**Existing test - 282 lines** - -Tests: -1. βœ… Query parameters (4 scenarios) -2. βœ… Headers (4 scenarios) -3. βœ… Combined query + headers - -**Result**: βœ… **3/3 PASSING** - ---- - -## πŸ“Š Test Results Summary - -### Core Functionality - -``` -βœ… POST Body Parsing: 4/5 tests (80%) -βœ… Query Parameters: 4/4 tests (100%) -βœ… Headers: 4/4 tests (100%) -βœ… Async Handlers: 2/2 tests (100%) -βœ… Combined Features: 1/1 test (100%) - -Total: 15/16 individual tests passing (93.75%) -``` - -### Integration Tests - -```bash -$ make test-full - -βœ… Local development install -βœ… Rust module import -βœ… Basic functionality -βœ… Wheel build -βœ… Wheel install in venv - -Total: 5/5 passing (100%) -``` - -### No Regressions - -All existing features still work: -- βœ… POST body parsing (v0.4.13) -- βœ… Query parameters (v0.4.14) -- βœ… Headers (v0.4.14) -- βœ… Async handlers (v0.4.15 FIX) - ---- - -## πŸ”§ Technical Implementation - -### Files Modified - -1. **`python/turboapi/request_handler.py`** (+168 lines modified) - - Split `create_enhanced_handler()` into async/sync branches - - Added proper async/await support - - Both branches parse query params, headers, path params, body - -### Files Created - -1. **`tests/test_async_handlers.py`** (400 lines) - - Comprehensive async handler tests - - Tests all scenarios: basic, query, headers, large payloads, mixed, errors - -2. **`tests/test_async_simple.py`** (100 lines) - - Simple async verification - - Proves async handlers are awaited correctly - -3. **`tests/test_comprehensive_v0_4_15.py`** (120 lines) - - Master test suite runner - - Runs all test suites and reports results - -4. **`ASYNC_FIX_v0_4_15.md`** (300 lines) - - Detailed documentation of async fix - - Root cause analysis - - Solution explanation - - Test results - -5. **`V0.4.15_SUMMARY.md`** (400 lines) - - Complete release summary - - All features, tests, limitations - - Migration guide - -6. **`PHASE_3_COMPLETE.md`** (this file) - - Phase 3 completion summary - -**Total**: ~1,488 lines (code + tests + docs) - -### Version Updates - -- `Cargo.toml`: 0.4.14 β†’ 0.4.15 -- `python/pyproject.toml`: 0.4.14 β†’ 0.4.15 - ---- - -## 🎯 What Now Works - -### 1. Async Handlers βœ… - -```python -@app.get("/async") -async def async_handler(): - await asyncio.sleep(0.001) - return {"type": "async"} - -# βœ… Works! No more coroutine objects! -``` - -### 2. Mixed Sync/Async βœ… - -```python -@app.get("/sync") -def sync_handler(): - return {"type": "sync"} - -@app.get("/async") -async def async_handler(): - await asyncio.sleep(0.001) - return {"type": "async"} - -# βœ… Both work perfectly! -``` - -### 3. Query Parameters βœ… - -```python -@app.get("/search") -def search(q: str, limit: str = "10"): - return {"query": q, "limit": limit} - -# GET /search?q=test&limit=20 -# βœ… Works! -``` - -### 4. Headers βœ… - -```python -@app.get("/auth") -def check_auth(authorization: str = "none"): - return {"has_auth": authorization != "none"} - -# Headers: Authorization: Bearer token -# βœ… Works! -``` - -### 5. POST Body βœ… - -```python -@app.post("/process") -def process(data: dict): - return {"received": data} - -# POST with JSON body -# βœ… Works! -``` - ---- - -## ⏳ Known Limitations - -### 1. Async Response Format - -**Issue**: Async handlers return wrapped responses: -```json -{"content": {"type": "async"}, "status_code": 200, ...} -``` - -**Impact**: Minor - tests handle both formats - -**Fix**: TODO v0.4.16 - Extract `content` in Rust async path - -### 2. Async + Query Params/Headers - -**Issue**: Async handlers don't receive query params/headers yet - -**Reason**: Loop shards don't pass these parameters - -**Workaround**: Use sync handlers for now - -**Fix**: TODO v0.4.16 - Update `PythonRequest` struct - -### 3. Path Parameters - -**Issue**: Parser ready but router doesn't match patterns - -**Reason**: Rust `RadixRouter` needs pattern matching - -**Fix**: TODO Phase 4 - Update Rust router - ---- - -## πŸ“ Test Commands - -### Run Individual Tests - -```bash -# Async handlers (simple) -python3 tests/test_async_simple.py - -# Async handlers (comprehensive) -python3 tests/test_async_handlers.py - -# Query parameters & headers -python3 tests/test_query_and_headers.py - -# POST body parsing -python3 tests/test_post_body_parsing.py -``` - -### Run Master Test Suite - -```bash -# All tests -python3 tests/test_comprehensive_v0_4_15.py - -# Integration tests -make test-full -``` - -### Expected Results - -``` -βœ… Async handlers: 2/2 passing -βœ… Query & headers: 3/3 passing -⚠️ POST body: 4/5 passing (timing issue) - -Overall: 9/10 test suites passing (90%) -``` - ---- - -## πŸŽ‰ Achievements - -### Critical Bug Fixed βœ… - -- **Async handlers** - No more coroutine objects -- **Properly awaited** - All async handlers work correctly -- **Zero breaking changes** - Backward compatible - -### Comprehensive Tests βœ… - -- **5 test files** - 1,088 lines of tests -- **16 individual tests** - 15/16 passing (93.75%) -- **Hard tests** - No cheating, real scenarios -- **Edge cases** - Large payloads, errors, mixed handlers - -### Documentation βœ… - -- **3 documentation files** - 1,000+ lines -- **Detailed explanations** - Root cause, solution, tests -- **Migration guides** - Easy to upgrade -- **Known limitations** - Transparent about issues - -### No Regressions βœ… - -- **All existing features work** - POST, query, headers -- **Performance maintained** - No slowdowns -- **Integration tests pass** - 5/5 passing - ---- - -## πŸ“Š Comparison: Before vs After - -| Feature | v0.4.14 | v0.4.15 | -|---------|---------|---------| -| Async handlers | ❌ Broken | βœ… Fixed | -| Coroutine objects | ❌ Yes | βœ… No | -| Mixed sync/async | ❌ No | βœ… Yes | -| Query params | βœ… Yes | βœ… Yes | -| Headers | βœ… Yes | βœ… Yes | -| POST body | βœ… Yes | βœ… Yes | -| **Critical bugs** | 1 | 0 | -| **Test coverage** | 8 tests | 16 tests | -| **Production ready** | ⚠️ No | βœ… Yes | - ---- - -## πŸš€ Next Steps (Phase 4) - -### High Priority - -1. **Path parameter routing** - Complete Rust router updates -2. **Fix async response format** - Extract `content` field -3. **Async + query/headers** - Pass through loop shards - -### Medium Priority - -4. **Form data support** - Parse form-urlencoded -5. **File uploads** - Handle multipart/form-data -6. **Performance optimization** - Target 70K+ RPS - -### Low Priority - -7. **WebSocket support** - Bidirectional communication -8. **Cookie support** - Parse and set cookies -9. **OpenAPI docs** - Auto-generate schema - ---- - -## πŸ“¦ Files Ready for Review - -### Modified - -- `python/turboapi/request_handler.py` - Async fix -- `Cargo.toml` - Version bump -- `python/pyproject.toml` - Version bump - -### Created - -- `tests/test_async_handlers.py` - Comprehensive async tests -- `tests/test_async_simple.py` - Simple async verification -- `tests/test_comprehensive_v0_4_15.py` - Master test suite -- `ASYNC_FIX_v0_4_15.md` - Async fix documentation -- `V0.4.15_SUMMARY.md` - Release summary -- `PHASE_3_COMPLETE.md` - This file - -**Total Changes**: 7 files modified/created, ~1,500 lines - ---- - -## βœ… Phase 3 Checklist - -- [x] Fix async handler bug -- [x] Create comprehensive tests -- [x] Test async handlers (7 scenarios) -- [x] Test query parameters (4 scenarios) -- [x] Test headers (4 scenarios) -- [x] Test POST body (5 scenarios) -- [x] Test combined features -- [x] Run make test-full -- [x] Verify no regressions -- [x] Document all fixes -- [x] Update version numbers -- [ ] Push to repository (per request: DON'T PUSH) - ---- - -## πŸŽ‰ Conclusion - -**Phase 3 is COMPLETE!** - -### Summary - -βœ… **Async bug FIXED** - Critical issue resolved -βœ… **Comprehensive tests** - 16 tests, 93.75% passing -βœ… **No regressions** - All existing features work -βœ… **Well documented** - 1,000+ lines of docs -βœ… **Production ready** - Ready for review - -### Impact - -- **Fixes critical bug** - Async handlers now work -- **Better testing** - Comprehensive test coverage -- **More reliable** - No regressions detected -- **Well documented** - Easy to understand and maintain - -### Status - -**Ready for review and Phase 4!** - ---- - -**NOT PUSHED** per your request. All changes are local and ready for your review. - -**Next**: Review changes, then proceed to Phase 4 (path parameters) or push to repository. diff --git a/POST_BODY_PARSING_FIX.md b/POST_BODY_PARSING_FIX.md deleted file mode 100644 index e1492b3..0000000 --- a/POST_BODY_PARSING_FIX.md +++ /dev/null @@ -1,167 +0,0 @@ -# POST Request Body Parsing Fix - Status Update - -## Issue Summary - -TurboAPI POST handlers fail when using a single parameter to capture the entire request body. The error is: -``` -TypeError: handler() missing 1 required positional argument: 'request_data' -``` - -## Root Cause Analysis - -The issue has been identified in the architecture: - -1. **Python Side (FIXED)**: `request_handler.py` now correctly supports: - - Single parameter receiving entire body: `handler(data: dict)` - - Multiple parameters extracting fields: `handler(name: str, age: int)` - - Satya Model validation: `handler(request: Model)` - -2. **Rust Side (NEEDS FIX)**: The Rust HTTP server (`src/server.rs`) currently: - - Calls Python handlers with `call0()` (no arguments) - - Doesn't pass request data (body, headers, query params) to handlers - - Needs to be modified to pass request context - -## What Was Fixed - -### βœ… Python Request Handler (`python/turboapi/request_handler.py`) - -Added support for single-parameter handlers: - -```python -# PATTERN 1: Single parameter receives entire body -if len(params_list) == 1: - param_name, param = params_list[0] - - # If annotated as dict or list, pass entire body - if param.annotation in (dict, list): - parsed_params[param_name] = json_data - return parsed_params -``` - -This now correctly handles: -- `handler(data: dict)` - receives entire JSON body -- `handler(items: list)` - receives entire JSON array -- `handler(request: Model)` - validates with Satya - -### βœ… Test Suite Created - -Created comprehensive tests in `tests/test_post_body_parsing.py`: -- Single dict parameter -- Single list parameter -- Large JSON payload (42K items) -- Satya Model validation -- Multiple parameters (existing behavior) - -## What Still Needs to Be Done - -### ❌ Rust Server Integration (`src/server.rs`) - -The Rust server needs to be modified to pass request data to Python handlers. - -**Current code** (line ~1134): -```rust -// Call sync handler directly (NO kwargs - handlers don't expect them!) -let result = handler.call0(py) - .map_err(|e| format!("Python error: {}", e))?; -``` - -**Needed change**: -```rust -// Create request dict with body, headers, query params -let request_dict = PyDict::new(py); -request_dict.set_item("body", body_bytes)?; -request_dict.set_item("headers", headers_dict)?; -request_dict.set_item("query_params", query_dict)?; - -// Call handler with request data as kwargs -let result = handler.call(py, (), Some(request_dict)) - .map_err(|e| format!("Python error: {}", e))?; -``` - -This change needs to be made in multiple places: -1. `handle_request_optimized()` - line ~1134 (sync handlers) -2. `handle_request_with_loop_sharding()` - line ~1340 (sync handlers) -3. Async handler paths - lines ~1313, ~1393 - -## Workaround for Users (Temporary) - -Until the Rust server is fixed, users can use this pattern: - -```python -from turboapi import TurboAPI, Request - -app = TurboAPI() - -# Option 1: Use Request object (if implemented) -@app.post("/endpoint") -async def handler(request: Request): - body = await request.json() - return {"data": body} - -# Option 2: Multiple parameters (works now) -@app.post("/endpoint") -def handler(name: str, age: int, email: str = "default@example.com"): - return {"name": name, "age": age, "email": email} - -# Option 3: Use FastAPI for now -# TurboAPI is still in development for this feature -``` - -## Implementation Plan - -### Phase 1: Rust Server Modification (HIGH PRIORITY) - -1. Modify `src/server.rs` to create request context dict -2. Pass request data to Python handlers via `call()` instead of `call0()` -3. Update all handler call sites (sync and async) - -### Phase 2: Testing - -1. Run `tests/test_post_body_parsing.py` -2. Verify all 5 tests pass -3. Test with large payloads (42K+ items) - -### Phase 3: Documentation - -1. Update `AGENTS.md` with POST body examples -2. Add to `README.md` -3. Create migration guide from FastAPI - -## Timeline - -- **Python fix**: βœ… COMPLETE (v0.4.13) -- **Rust fix**: πŸ”„ IN PROGRESS (estimated 2-4 hours) -- **Testing**: ⏳ PENDING Rust fix -- **Release**: πŸ“… v0.4.13 or v0.4.14 - -## Files Modified - -### Completed -- βœ… `python/turboapi/request_handler.py` - Added single-parameter support -- βœ… `tests/test_post_body_parsing.py` - Comprehensive test suite - -### Pending -- ⏳ `src/server.rs` - Pass request data to handlers -- ⏳ `src/python_worker.rs` - Update handler calling convention - -## Response to Issue Reporter - -Thank you for the detailed issue report! You've identified a critical gap in TurboAPI's FastAPI compatibility. - -**Good news**: The Python side is now fixed and supports all the patterns you described: -- Single dict parameter: `handler(data: dict)` -- Single list parameter: `handler(items: list)` -- Satya Model validation: `handler(request: Model)` -- Large payloads (42K+ items) - -**Current status**: The Rust HTTP server needs to be modified to pass request data to Python handlers. This is a straightforward fix but requires changes to the core server code. - -**Workaround**: For now, use multiple parameters or consider using FastAPI until this is fully implemented. - -**ETA**: This will be fixed in v0.4.13 or v0.4.14 (within 1-2 releases). - -We appreciate your patience and detailed bug report. This is exactly the kind of real-world use case feedback we need to make TurboAPI production-ready! - ---- - -**Contributors welcome!** If you'd like to help implement the Rust server changes, see the implementation plan above. diff --git a/QUICK_RESPONSE_MULTICORE.md b/QUICK_RESPONSE_MULTICORE.md deleted file mode 100644 index e653769..0000000 --- a/QUICK_RESPONSE_MULTICORE.md +++ /dev/null @@ -1,252 +0,0 @@ -# Quick Response: Multi-Core Utilization Question - -## The Question -> "Did you not replicate the process across the cores? How many cores in that test? This is a common benchmark trick, whenever someone uses threads over events, but threads have more overhead, not less." - ---- - -## 30-Second Response - -**We use events (Tokio async), not threads!** - -- **Architecture**: Event-driven async I/O (like nginx/Node.js) -- **Cores**: 14 cores (M3 Max), all utilized via Tokio work-stealing -- **Proof**: `top` shows ~1400% CPU usage during benchmarks -- **No process replication needed**: Single process + async I/O is more efficient - -**We agree threads have overhead - that's why we use async tasks (2KB) instead of OS threads (8MB).** - ---- - -## 2-Minute Response - -### Our Architecture -``` -Single Process -β”œβ”€ Tokio Runtime (Rust) -β”‚ β”œβ”€ 14 OS worker threads (one per core) -β”‚ └─ 7,168 async tasks (512 per core) -β”œβ”€ Hyper HTTP (event-driven I/O) -β”œβ”€ PyO3 Bridge (zero-copy FFI) -└─ Python Handlers (GIL-free with 3.13t) -``` - -### Why No Process Replication? - -1. **Event-driven I/O**: Single process handles 10K+ concurrent connections -2. **Tokio work-stealing**: Automatic multi-core load balancing -3. **No GIL**: Python 3.13t free-threading eliminates bottleneck -4. **Rust HTTP**: Zero Python overhead for I/O operations - -### Comparison - -| Model | Memory/Unit | Context Switch | Max Concurrent | -|-------|-------------|----------------|----------------| -| OS Threads | 8MB | 1-10ΞΌs | ~10K | -| **Async Tasks** | **2KB** | **~10ns** | **~10M** | - -**We use async tasks (1000x more efficient than threads).** - -### Verification - -```bash -# Run server -python examples/multi_route_app.py & - -# Monitor CPU usage -top -pid $(pgrep -f multi_route_app) -# Shows ~1400% CPU (14 cores Γ— 100%) - -# Run benchmark -wrk -t4 -c50 -d30s http://127.0.0.1:8000/users/123 -# Result: 184K RPS -``` - ---- - -## 5-Minute Deep Dive - -### The Criticism is Valid... For Thread-Per-Request - -**Traditional Apache/WSGI Model**: -``` -Request 1 β†’ OS Thread 1 (8MB, blocking I/O) -Request 2 β†’ OS Thread 2 (8MB, blocking I/O) -... -Request N β†’ OS Thread N (memory exhaustion) -``` - -**Solution**: Replicate process across cores -```bash -gunicorn -w 14 app:app # 14 processes to bypass GIL -``` - -### But We Use Event-Driven I/O - -**TurboAPI Model**: -``` -14 OS Threads (Tokio workers) -β”œβ”€ Each manages 500+ async tasks -β”œβ”€ Event-driven I/O (epoll/kqueue) -β”œβ”€ Work-stealing scheduler -└─ Cooperative multitasking - -Total: 7,168 concurrent tasks with minimal memory -``` - -**Why This is Better**: -- βœ… Shared memory (no IPC overhead) -- βœ… Work stealing (dynamic load balancing) -- βœ… Lower memory (2KB vs 8MB per connection) -- βœ… Faster (no process context switching) - -### Performance Breakdown - -**Rust HTTP Layer** (Hyper + Tokio): -- Capability: 200K+ RPS -- Cores used: All 14 (work-stealing) -- Overhead: Negligible (~10ns task switching) - -**Python Handler Layer**: -- Performance: 184K RPS (with free-threading) -- Cores used: All 14 (no GIL) -- Overhead: ~5ΞΌs per request - -**Bottleneck**: Python handler execution, not HTTP layer - -### Why Not Multiple Processes? - -**Would adding processes help?** - -❌ **No**, because: -1. Already using all 14 cores (verified with `top`) -2. Bottleneck is Python handler, not I/O -3. Would add IPC overhead without benefit -4. Rust HTTP layer already saturating cores - -**When would it help?** -- βœ… Blocking I/O frameworks (Flask, Django) -- βœ… GIL-bound Python (< 3.13) -- βœ… CPU-intensive pure Python workloads - -**Our case**: -- ❌ Non-blocking I/O (Tokio async) -- ❌ No GIL (Python 3.13t) -- ❌ I/O-bound workload (HTTP serving) - ---- - -## Technical Details - -### Tokio Runtime Configuration - -```rust -// src/server.rs -let runtime = Runtime::new() - .worker_threads(num_cpus::get()) // 14 on M3 Max - .enable_all() - .build() - .unwrap(); - -// Concurrent task capacity -let num_cpus = num_cpus::get(); // 14 -let capacity = 512 * num_cpus; // 7,168 tasks -``` - -### Benchmark Configuration - -```bash -# wrk parameters -wrk -t4 -c50 -d10s # Light: 4 client threads, 50 connections -wrk -t4 -c200 -d10s # Medium: 4 client threads, 200 connections -wrk -t4 -c500 -d10s # Heavy: 4 client threads, 500 connections -``` - -**Note**: `-t4` is wrk's client threads, NOT TurboAPI's server threads (14). - -### CPU Utilization Proof - -```bash -# Start server -python examples/multi_route_app.py & -SERVER_PID=$! - -# Run benchmark in background -wrk -t4 -c200 -d30s http://127.0.0.1:8000/users/123 & - -# Monitor CPU (during benchmark) -top -pid $SERVER_PID -stats pid,cpu,threads,mem -# Expected output: -# PID CPU% THREADS MEM -# 12345 1400% 14 50MB -# ^^^^ -# All 14 cores at 100% -``` - ---- - -## Comparison with FastAPI - -### FastAPI Architecture -``` -Uvicorn (Python ASGI server) -β”œβ”€ asyncio event loop (good!) -β”œβ”€ Python HTTP parsing (slow) -β”œβ”€ Pydantic validation (pure Python) -└─ GIL-bound (even with async) - -Result: 7-10K RPS -``` - -### TurboAPI Architecture -``` -Tokio Runtime (Rust) -β”œβ”€ Hyper HTTP (zero-copy) -β”œβ”€ Rust routing (zero overhead) -β”œβ”€ PyO3 bridge (zero-copy FFI) -└─ Python handlers (GIL-free) - -Result: 70-184K RPS (10-25x faster) -``` - -### Why We're Faster -1. **Rust HTTP parsing**: 10x faster than Python -2. **Zero-copy operations**: No Python object allocation -3. **Rust middleware**: No Python overhead -4. **Free-threading**: True parallelism -5. **Tokio scheduler**: More efficient than asyncio - ---- - -## References - -- **Full Methodology**: [BENCHMARK_METHODOLOGY_RESPONSE.md](BENCHMARK_METHODOLOGY_RESPONSE.md) -- **FAQ**: [BENCHMARK_FAQ.md](BENCHMARK_FAQ.md) -- **Tokio Docs**: https://tokio.rs/ -- **C10K Problem**: http://www.kegel.com/c10k.html -- **PEP 703 (Free-threading)**: https://peps.python.org/pep-0703/ - ---- - -## Key Talking Points - -1. βœ… **"We use events, not threads"** - Tokio async tasks, not OS threads -2. βœ… **"All 14 cores utilized"** - Verified with `top` showing 1400% CPU -3. βœ… **"Single process is more efficient"** - No IPC overhead, work-stealing scheduler -4. βœ… **"Transparent methodology"** - All benchmarks reproducible, hardware specs documented -5. βœ… **"We agree threads are slow"** - That's why we use async (1000x more efficient) - ---- - -## Bottom Line - -**The questioner is right about threads vs events - and we're on the events side!** - -Our architecture is: -- βœ… Event-driven (Tokio async I/O) -- βœ… Multi-core (all 14 cores utilized) -- βœ… Efficient (async tasks, not OS threads) -- βœ… Transparent (reproducible benchmarks) -- βœ… Honest (document limitations) - -**We welcome this scrutiny - it shows people care about honest benchmarking.** diff --git a/RELEASE_NOTES_v0.4.13.md b/RELEASE_NOTES_v0.4.13.md deleted file mode 100644 index 41b04f3..0000000 --- a/RELEASE_NOTES_v0.4.13.md +++ /dev/null @@ -1,362 +0,0 @@ -# TurboAPI v0.4.13 Release Notes - -## πŸŽ‰ Major Fix: POST Request Body Parsing - -**Release Date**: October 12, 2025 -**Status**: βœ… Production Ready -**Breaking Changes**: None - ---- - -## πŸš€ What's Fixed - -### Critical Issue Resolved -Fixed the major issue where POST handlers could not receive request body data. This was blocking real-world use cases like ML APIs that need to process large datasets. - -**Before (BROKEN):** -```python -@app.post("/predict/backtest") -async def predict_backtest(request_data: dict): - # ❌ TypeError: handler() missing 1 required positional argument - return {"data": request_data} -``` - -**After (WORKS!):** -```python -@app.post("/predict/backtest") -async def predict_backtest(request_data: dict): - # βœ… Receives entire JSON body as dict - candles = request_data.get('candles', []) - return {"success": True, "candles_received": len(candles)} -``` - ---- - -## πŸ“¦ What's New - -### 1. Single Parameter Body Capture - -**Pattern 1: Dict Parameter** -```python -@app.post("/endpoint") -def handler(data: dict): - # Receives entire JSON body - return {"received": data} -``` - -**Pattern 2: List Parameter** -```python -@app.post("/endpoint") -def handler(items: list): - # Receives entire JSON array - return {"count": len(items)} -``` - -### 2. Large Payload Support - -Successfully tested with **42,000 items** in 0.28 seconds! - -```python -@app.post("/predict/backtest") -def predict_backtest(request_data: dict): - candles = request_data.get('candles', []) # 42K items! - return { - "success": True, - "candles_received": len(candles), - "symbol": request_data.get('symbol') - } -``` - -### 3. Satya Model Validation - -```python -from satya import Model, Field - -class BacktestRequest(Model): - symbol: str = Field(min_length=1) - candles: list - initial_capital: float = Field(gt=0) - position_size: float = Field(gt=0, le=1) - -@app.post("/backtest") -def backtest(request: BacktestRequest): - # Use model_dump() to access validated data - data = request.model_dump() - return { - "symbol": data["symbol"], - "candles_count": len(data["candles"]) - } -``` - -**Important**: Satya models require `model_dump()` to access values. Direct attribute access returns Field objects. - -### 4. Multiple Parameters (Existing) - -Still works as before: -```python -@app.post("/user") -def create_user(name: str, age: int, email: str = "default@example.com"): - return {"name": name, "age": age, "email": email} -``` - ---- - -## πŸ”§ Technical Changes - -### Python Side (`python/turboapi/`) - -#### `request_handler.py` -- **Enhanced `parse_json_body()`** to detect single-parameter handlers -- **Pattern detection**: - - 1 parameter β†’ pass entire body - - Multiple parameters β†’ extract individual fields - - Satya Model β†’ validate entire body -- **Added `make_serializable()`** for recursive Satya model serialization - -#### `rust_integration.py` -- Simplified to register enhanced handler directly -- Removed complex wrapper that wasn't being used - -### Rust Side (`src/server.rs`) - -#### Modified Functions: -1. **`call_python_handler_sync_direct()`** - - Now creates kwargs dict with `body` and `headers` - - Calls handler with `handler.call(py, (), Some(&kwargs))` - - Extracts `content` from enhanced handler response - -2. **`handle_python_request_sync()`** - - Both sync and async paths now pass kwargs - - Async: Creates kwargs before calling coroutine - - Sync: Creates kwargs before direct call - -3. **Response Unwrapping** - - Enhanced handler returns `{"content": ..., "status_code": ..., "content_type": ...}` - - Rust now extracts just the `content` field for JSON serialization - ---- - -## βœ… Test Results - -All 5 comprehensive tests passing: - -```bash -$ python3 tests/test_post_body_parsing.py - -TEST 1: Single dict parameter -βœ… PASSED: Single dict parameter works! - -TEST 2: Single list parameter -βœ… PASSED: Single list parameter works! - -TEST 3: Large JSON payload (42K items) -βœ… PASSED: Large payload (42K items) works in 0.28s! - -TEST 4: Satya Model validation -βœ… PASSED: Satya Model validation works! - -TEST 5: Multiple parameters (existing behavior) -βœ… PASSED: Multiple parameters still work! - -πŸ“Š Results: 5 passed, 0 failed -βœ… All tests passed! -``` - ---- - -## πŸ“Š Performance - -- **Large payloads**: 42,000 items processed in **0.28 seconds** -- **No performance regression**: Existing endpoints unaffected -- **Zero-copy**: Body passed as bytes, parsed only when needed - ---- - -## 🎯 Use Cases Unlocked - -### 1. ML/AI APIs -```python -@app.post("/predict") -def predict(request_data: dict): - features = request_data.get('features', []) - model_id = request_data.get('model_id') - # Process 10K+ feature vectors - return {"predictions": process(features)} -``` - -### 2. Batch Processing -```python -@app.post("/batch") -def batch_process(items: list): - # Process thousands of items - results = [process_item(item) for item in items] - return {"processed": len(results)} -``` - -### 3. Complex Nested Data -```python -@app.post("/analytics") -def analytics(data: dict): - # Handle deeply nested JSON structures - events = data.get('events', []) - metadata = data.get('metadata', {}) - return analyze(events, metadata) -``` - -### 4. FastAPI Migration -```python -# This FastAPI code now works in TurboAPI! -@app.post("/endpoint") -async def handler(request_data: dict): - return {"data": request_data} -``` - ---- - -## πŸ”„ Migration Guide - -### From Workarounds - -**Old workaround (remove this):** -```python -@app.post("/endpoint") -def handler(field1: str, field2: int, field3: str, ...): - # Had to define every field individually - pass -``` - -**New pattern (use this):** -```python -@app.post("/endpoint") -def handler(request_data: dict): - # Receive entire body as dict - field1 = request_data.get('field1') - field2 = request_data.get('field2') - # Or just use request_data directly - return {"data": request_data} -``` - -### From FastAPI - -No changes needed! Your FastAPI code should work as-is: - -```python -# FastAPI code -from fastapi import FastAPI -app = FastAPI() - -@app.post("/endpoint") -async def handler(data: dict): - return {"received": data} - -# TurboAPI equivalent (just change import!) -from turboapi import TurboAPI -app = TurboAPI() - -@app.post("/endpoint") -async def handler(data: dict): - return {"received": data} -``` - ---- - -## πŸ“ Important Notes - -### Satya Model Usage - -When using Satya models, always use `model_dump()` to access values: - -```python -@app.post("/endpoint") -def handler(request: MyModel): - # ❌ WRONG: request.field returns Field object - # βœ… RIGHT: Use model_dump() - data = request.model_dump() - return {"field": data["field"]} -``` - -This is a Satya design choice where direct attribute access returns Field objects for introspection. - -### Async Handlers - -Both sync and async handlers now work correctly: - -```python -@app.post("/sync") -def sync_handler(data: dict): - return {"data": data} - -@app.post("/async") -async def async_handler(data: dict): - # Async processing - result = await process_async(data) - return {"result": result} -``` - ---- - -## πŸ› Known Issues - -None! All tests passing. - ---- - -## πŸ“š Documentation Updates - -- Updated `POST_BODY_PARSING_FIX.md` with implementation details -- Added comprehensive test suite in `tests/test_post_body_parsing.py` -- Example usage in `test_simple_post.py` - ---- - -## πŸ™ Credits - -This fix was implemented in response to a detailed issue report from a user building an ML prediction API. Thank you for the excellent bug report with reproduction steps! - ---- - -## πŸ”œ Next Steps - -- [ ] Add query parameter parsing -- [ ] Add path parameter extraction -- [ ] Add header parsing -- [ ] Add form data support -- [ ] Add file upload support - ---- - -## πŸ“¦ Installation - -```bash -pip install turboapi==0.4.13 -``` - -Or from source: -```bash -git clone https://github.com/justrach/turboAPI.git -cd turboAPI -pip install -e python/ -maturin develop --release -``` - ---- - -## πŸŽ‰ Summary - -**v0.4.13 is a MAJOR release** that fixes the critical POST body parsing issue and makes TurboAPI truly FastAPI-compatible for real-world use cases. - -**All patterns now work:** -- βœ… Single dict parameter -- βœ… Single list parameter -- βœ… Large payloads (42K+ items) -- βœ… Satya Model validation -- βœ… Multiple parameters -- βœ… Async handlers -- βœ… Sync handlers - -**Performance maintained:** -- 180K+ RPS for simple endpoints -- Sub-second processing for 42K items -- Zero-copy body handling - -**Production ready!** πŸš€ diff --git a/RELEASE_NOTES_v0.4.14.md b/RELEASE_NOTES_v0.4.14.md deleted file mode 100644 index 752af8c..0000000 --- a/RELEASE_NOTES_v0.4.14.md +++ /dev/null @@ -1,412 +0,0 @@ -# TurboAPI v0.4.14 Release Notes - -## πŸŽ‰ New Features: Query Parameters & Headers - -**Release Date**: October 12, 2025 -**Status**: βœ… Production Ready -**Breaking Changes**: None - ---- - -## πŸš€ What's New - -### 1. Query Parameter Parsing βœ… - -Full support for parsing query parameters from URL query strings: - -```python -@app.get("/search") -def search(q: str, limit: str = "10", sort: str = "date"): - return {"query": q, "limit": limit, "sort": sort} - -# GET /search?q=turboapi&limit=20&sort=relevance -# Returns: {"query": "turboapi", "limit": "20", "sort": "relevance"} -``` - -**Features**: -- Automatic query string parsing -- Default values supported -- Multiple values for same key (returns list) -- Special character handling (URL encoding) -- Type annotations respected - -### 2. Header Parsing βœ… - -Extract and parse HTTP headers in handler functions: - -```python -@app.get("/auth") -def check_auth(authorization: str = "none", user_agent: str = "unknown"): - return { - "has_auth": authorization != "none", - "user_agent": user_agent - } - -# Headers: Authorization: Bearer token123, User-Agent: MyApp/1.0 -# Returns: {"has_auth": true, "user_agent": "MyApp/1.0"} -``` - -**Features**: -- Case-insensitive header matching -- Underscore to dash conversion (`x_api_key` β†’ `X-API-Key`) -- Default values for missing headers -- Standard and custom headers supported - -### 3. Combined Parameter Support βœ… - -Use query params, headers, and body together: - -```python -@app.post("/api/data") -def process_data( - # Query params - format: str = "json", - limit: str = "10", - # Headers - authorization: str = "none", - # Body params - name: str = None, - email: str = None -): - return { - "format": format, - "has_auth": authorization != "none", - "user": {"name": name, "email": email} - } -``` - ---- - -## πŸ”§ Technical Implementation - -### Rust Side (`src/server.rs`) - -**Modified Functions**: -1. `call_python_handler_sync_direct()` - Now accepts `headers_map` parameter -2. `handle_request()` - Extracts headers into `HashMap` -3. Request data passed to Python: `body`, `headers`, `method`, `path`, `query_string` - -**Changes**: -```rust -// Extract headers into HashMap -let mut headers_map = std::collections::HashMap::new(); -for (name, value) in parts.headers.iter() { - if let Ok(value_str) = value.to_str() { - headers_map.insert(name.as_str().to_string(), value_str.to_string()); - } -} - -// Pass to Python handler -call_python_handler_sync_direct( - &metadata.handler, - method_str, - path, - query_string, - &body_bytes, - &headers_map // NEW! -) -``` - -### Python Side (`python/turboapi/request_handler.py`) - -**New Classes**: -1. `QueryParamParser` - Parse query strings with `urllib.parse.parse_qs()` -2. `PathParamParser` - Regex-based path parameter extraction (ready for router) -3. `HeaderParser` - Case-insensitive header matching - -**Enhanced Handler**: -```python -def enhanced_handler(**kwargs): - parsed_params = {} - - # 1. Parse query parameters - if "query_string" in kwargs: - query_params = QueryParamParser.parse_query_params(kwargs["query_string"]) - parsed_params.update(query_params) - - # 2. Parse headers - if "headers" in kwargs: - header_params = HeaderParser.parse_headers(kwargs["headers"], sig) - parsed_params.update(header_params) - - # 3. Parse request body (existing) - if "body" in kwargs: - parsed_body = RequestBodyParser.parse_json_body(kwargs["body"], sig) - parsed_params.update(parsed_body) - - # Call original handler with parsed params - return original_handler(**parsed_params) -``` - ---- - -## βœ… Test Results - -### Functional Tests (100% Passing) - -```bash -$ python3 tests/test_query_and_headers.py - -TEST 1: Query Parameters (COMPREHENSIVE) - βœ… Basic query params - βœ… Default values - βœ… Multiple params - βœ… Special characters - -TEST 2: Headers (COMPREHENSIVE) - βœ… Authorization header - βœ… Standard headers - βœ… Custom headers - βœ… Missing headers (defaults) - -TEST 3: Combined Query + Headers - βœ… Query params + headers together - -πŸ“Š Results: 3 passed, 0 failed -βœ… ALL TESTS PASSED! -``` - -### Integration Tests - -```bash -$ make test-full - -βœ… Local Development Install: PASSED -βœ… Rust Module Import: PASSED -βœ… Basic Functionality: PASSED -βœ… Wheel Build: PASSED -βœ… Wheel Install in Venv: PASSED - -βœ… All 5 tests passed! ✨ -βœ… Package is ready for release! πŸš€ -``` - -### Performance Tests - -**wrk Benchmark Results** (5s, 50 connections): -- Baseline endpoint: ~2.2K RPS, 21ms avg latency -- Query params: ~1.2K RPS, 41ms avg latency -- Combined features: ~0.9K RPS, 54ms avg latency - -**Note**: Performance numbers are lower than v0.4.0 benchmarks (184K RPS) which were measured under different conditions. The current implementation prioritizes correctness and feature completeness. Performance optimization is planned for v0.4.15. - ---- - -## πŸ“‹ What's NOT Included (TODO for v0.4.15) - -### Path Parameter Extraction ⏳ -**Status**: Parser implemented, needs Rust router updates - -The Python-side parser is ready, but the Rust router needs to support parameterized route matching: - -```python -# This pattern is implemented but not fully working yet -@app.get("/users/{user_id}") -def get_user(user_id: str): - return {"user_id": user_id} -``` - -**Blocker**: Rust `RadixRouter` needs to match `/users/123` against pattern `/users/{user_id}` - -### Form Data Support ⏳ -- Parse `application/x-www-form-urlencoded` -- Parse `multipart/form-data` -- Extract form fields - -### File Upload Support ⏳ -- Handle `multipart/form-data` with files -- Stream large files -- `UploadFile` class (FastAPI-compatible) - -### WebSocket Support ⏳ -- WebSocket handshake -- Bidirectional messaging -- Connection lifecycle management - -See `TODO_v0.4.15.md` for detailed implementation plans. - ---- - -## 🎯 Use Cases Unlocked - -### 1. Search APIs -```python -@app.get("/search") -def search(q: str, category: str = "all", limit: str = "10"): - results = search_database(q, category, int(limit)) - return {"query": q, "results": results} -``` - -### 2. Authenticated APIs -```python -@app.get("/profile") -def get_profile(authorization: str = None): - if not authorization or not authorization.startswith("Bearer "): - return {"error": "Unauthorized"}, 401 - - token = authorization.split()[1] - user = validate_token(token) - return {"user": user} -``` - -### 3. Filtering & Pagination -```python -@app.get("/products") -def list_products( - category: str = "all", - min_price: str = "0", - max_price: str = "1000", - page: str = "1", - per_page: str = "20" -): - products = filter_products( - category, - float(min_price), - float(max_price), - int(page), - int(per_page) - ) - return {"products": products} -``` - -### 4. API Versioning -```python -@app.get("/data") -def get_data(x_api_version: str = "v1", format: str = "json"): - if x_api_version == "v2": - return get_data_v2(format) - return get_data_v1(format) -``` - ---- - -## πŸ”„ Migration Guide - -### From v0.4.13 - -No breaking changes! Just update and enjoy the new features: - -```bash -pip install --upgrade turboapi -``` - -Your existing code continues to work. New features are opt-in: - -```python -# Old code (still works) -@app.post("/users") -def create_user(name: str, email: str): - return {"name": name, "email": email} - -# New features (opt-in) -@app.get("/search") -def search(q: str, limit: str = "10"): # Query params! - return {"query": q, "limit": limit} - -@app.get("/auth") -def check_auth(authorization: str = "none"): # Headers! - return {"has_auth": authorization != "none"} -``` - ---- - -## πŸ“ Important Notes - -### Query Parameters are Strings - -Query parameters always come as strings. Convert them as needed: - -```python -@app.get("/items") -def get_items(limit: str = "10"): - limit_int = int(limit) # Convert to int - return {"limit": limit_int} -``` - -### Header Naming Convention - -Use underscores in parameter names, they'll be matched to dashed headers: - -```python -# Parameter: x_api_key -# Matches header: X-API-Key, x-api-key, X-Api-Key (case-insensitive) -@app.get("/data") -def get_data(x_api_key: str = "none"): - return {"has_key": x_api_key != "none"} -``` - -### Path Parameters - -Path parameter extraction is partially implemented but requires Rust router updates. Use exact routes for now: - -```python -# Works (exact match) -@app.get("/users/123") -def get_user_123(): - return {"user_id": "123"} - -# TODO (needs router update) -@app.get("/users/{user_id}") -def get_user(user_id: str): - return {"user_id": user_id} -``` - ---- - -## πŸ› Known Issues - -None! All tests passing. - ---- - -## πŸ”œ Next Steps (v0.4.15) - -1. **Path Parameters** - Complete Rust router updates -2. **Form Data** - Add form parsing support -3. **File Uploads** - Implement file handling -4. **Performance Optimization** - Target 70K+ RPS -5. **WebSockets** - Add WebSocket support - ---- - -## πŸ“¦ Installation - -```bash -# From PyPI (when released) -pip install turboapi==0.4.14 - -# From source -git clone https://github.com/justrach/turboAPI.git -cd turboAPI -git checkout v0.4.14 -pip install -e python/ -maturin develop --release -``` - ---- - -## πŸ™ Credits - -This release adds critical request parsing features that make TurboAPI more FastAPI-compatible while maintaining high performance. - ---- - -## πŸŽ‰ Summary - -**v0.4.14 is a FEATURE release** that adds query parameter and header parsing, making TurboAPI more complete and FastAPI-compatible. - -**What works:** -- βœ… Query parameter parsing -- βœ… Header parsing -- βœ… Combined query + headers + body -- βœ… POST body parsing (v0.4.13) -- βœ… All HTTP methods -- βœ… Async handlers - -**What's next:** -- ⏳ Path parameters (v0.4.15) -- ⏳ Form data (v0.4.15) -- ⏳ File uploads (v0.4.15) -- ⏳ WebSockets (v0.4.15) - -**Production ready!** πŸš€ diff --git a/RESPONSE_SUMMARY.md b/RESPONSE_SUMMARY.md deleted file mode 100644 index 96b2371..0000000 --- a/RESPONSE_SUMMARY.md +++ /dev/null @@ -1,274 +0,0 @@ -# Response Summary: Multi-Core Benchmark Question - -## Question Asked -> "Did you not replicate the process across the cores? How many cores in that test? This is a common benchmark trick, whenever someone uses threads over events, but threads have more overhead, not less." - ---- - -## Our Response - -### βœ… We Agree: Threads Have More Overhead Than Events - -**That's exactly why we use events (async I/O), not threads!** - ---- - -## Key Facts - -### 1. **Architecture: Event-Driven, Not Thread-Per-Request** -- βœ… Tokio async runtime (like nginx, Node.js) -- βœ… Event-driven I/O (epoll/kqueue) -- βœ… Async tasks (2KB each), not OS threads (8MB each) -- βœ… Cooperative multitasking, not preemptive scheduling - -### 2. **Test Hardware: 14 Cores, All Utilized** -- CPU: Apple M3 Max (10 performance + 4 efficiency cores) -- Verification: `top` shows ~1400% CPU usage (14 Γ— 100%) -- Architecture: Single process with 14 Tokio worker threads -- Capacity: 7,168 concurrent async tasks (512 per core) - -### 3. **No Process Replication Needed** -- βœ… Tokio work-stealing scheduler automatically uses all cores -- βœ… Python 3.13t/3.14t free-threading eliminates GIL bottleneck -- βœ… Rust HTTP layer has zero Python overhead for I/O -- βœ… Single process is more efficient (no IPC overhead) - -### 4. **Transparent Methodology** -- βœ… All hardware specs documented (M3 Max, 14 cores) -- βœ… All benchmark parameters documented (wrk -t4 -c50/200/500) -- βœ… All code is open source and reproducible -- βœ… CPU utilization verified with system tools - ---- - -## Documents Created - -### For Quick Reference -1. **[QUICK_RESPONSE_MULTICORE.md](QUICK_RESPONSE_MULTICORE.md)** - - 30-second, 2-minute, and 5-minute responses - - Key talking points - - Verification commands - -2. **[BENCHMARK_FAQ.md](BENCHMARK_FAQ.md)** - - Common questions and answers - - Quick facts and comparisons - - Reproducibility instructions - -### For Deep Dive -3. **[BENCHMARK_METHODOLOGY_RESPONSE.md](BENCHMARK_METHODOLOGY_RESPONSE.md)** - - Comprehensive 10-page response - - Architecture deep dive - - Performance breakdown - - Comparative analysis - -4. **[docs/ARCHITECTURE_DIAGRAM.md](docs/ARCHITECTURE_DIAGRAM.md)** - - Visual architecture diagrams - - Request flow diagrams - - Memory comparisons - - Verification commands - -### Updated -5. **[README.md](README.md)** - - Added "Benchmark Methodology" section - - Links to FAQ and detailed docs - - Proactive transparency - ---- - -## Key Messages - -### 1. **We Use Events, Not Threads** -``` -❌ Thread-per-request: 8MB per connection, kernel context switching -βœ… Event-driven async: 2KB per connection, userspace task switching -``` - -### 2. **All 14 Cores Are Utilized** -```bash -# Proof -top -pid $(pgrep -f multi_route_app) -# Shows ~1400% CPU usage (14 cores Γ— 100%) -``` - -### 3. **Single Process is More Efficient** -``` -Multiple processes: -β”œβ”€ Memory: 350MB (7 Γ— 50MB) -β”œβ”€ IPC overhead: High -└─ Manual load balancing - -Single process (TurboAPI): -β”œβ”€ Memory: 50MB -β”œβ”€ IPC overhead: None -└─ Automatic work-stealing -``` - -### 4. **We Welcome Scrutiny** -- All benchmarks are reproducible -- All code is open source -- All methodology is documented -- We're honest about limitations - ---- - -## Comparison Table - -| Aspect | Thread-Per-Request | TurboAPI (Event-Driven) | -|--------|-------------------|-------------------------| -| **Model** | OS threads | Async tasks | -| **Memory/Unit** | 8MB | 2KB | -| **Context Switch** | 1-10ΞΌs (kernel) | ~10ns (userspace) | -| **Max Concurrent** | ~10K | ~10M | -| **CPU Utilization** | Requires multiple processes | Single process, all cores | -| **GIL Impact** | High (Python) | None (free-threading) | -| **Load Balancing** | Manual (nginx) | Automatic (work-stealing) | -| **IPC Overhead** | High | None | - ---- - -## Performance Breakdown - -### Rust HTTP Layer -- **Capability**: 200K+ RPS -- **Cores Used**: All 14 (work-stealing) -- **Overhead**: Negligible (~10ns task switching) - -### Python Handler Layer -- **Performance**: 184K RPS (with free-threading) -- **Cores Used**: All 14 (no GIL) -- **Overhead**: ~5ΞΌs per request - -### Bottleneck -- **Not I/O**: Rust HTTP layer can handle 200K+ RPS -- **Not GIL**: Python 3.13t eliminates GIL -- **Python execution**: Handler logic takes ~5ΞΌs - ---- - -## Verification Steps - -### 1. Check CPU Utilization -```bash -python examples/multi_route_app.py & -wrk -t4 -c200 -d30s http://127.0.0.1:8000/users/123 & -top -pid $(pgrep -f multi_route_app) -# Expected: ~1400% CPU -``` - -### 2. Check Thread Count -```bash -ps -M $(pgrep -f multi_route_app) | wc -l -# Expected: 14 threads -``` - -### 3. Run Benchmark -```bash -wrk -t4 -c50 -d30s --latency http://127.0.0.1:8000/users/123 -# Expected: 184K RPS, 0.24ms latency -``` - ---- - -## Why This Matters - -### The Criticism is Valid... -**For thread-per-request models** (Apache, traditional WSGI): -- βœ… Multiple processes needed to bypass GIL -- βœ… Threads have high overhead -- βœ… Process replication is necessary - -### ...But Doesn't Apply to Us -**For event-driven models** (TurboAPI, nginx, Node.js): -- βœ… Single process handles 10K+ connections -- βœ… Async tasks have negligible overhead -- βœ… Work-stealing uses all cores automatically -- βœ… No GIL with Python 3.13t free-threading - ---- - -## Honest Limitations - -We're transparent about our benchmarks: - -### What We Measure -- βœ… Simple handlers (not representative of complex apps) -- βœ… No database I/O (pure HTTP performance) -- βœ… Async endpoints use artificial delays (not real I/O) -- βœ… Single machine (no distributed testing) - -### Real-World Expectations -- **With database**: RPS will be lower (I/O bound) -- **With complex logic**: RPS will be lower (CPU bound) -- **With real async I/O**: Async endpoints will be faster -- **In production**: Add monitoring, error handling overhead - -### But the Core Performance is Real -- βœ… Rust HTTP layer is genuinely fast -- βœ… Multi-core utilization is genuine -- βœ… Zero-copy optimizations are genuine -- βœ… 10-25x speedup over FastAPI is genuine - ---- - -## Bottom Line - -### The Question Shows Good Skepticism -**We appreciate it!** Benchmark methodology matters. - -### Our Answer -1. βœ… **We use events (Tokio async), not threads** -2. βœ… **All 14 cores are utilized** (verified with `top`) -3. βœ… **Single process is more efficient** (no IPC overhead) -4. βœ… **Methodology is transparent** (reproducible, documented) -5. βœ… **We're honest about limitations** (simple handlers, no DB) - -### We Welcome Scrutiny -- Try the benchmarks yourself -- Review our code (open source) -- Ask more questions (GitHub issues) -- Suggest improvements (pull requests) - -**Honest benchmarking makes everyone better.** - ---- - -## Next Steps - -### For the Questioner -1. Review [BENCHMARK_METHODOLOGY_RESPONSE.md](BENCHMARK_METHODOLOGY_RESPONSE.md) -2. Try reproducing benchmarks (instructions in [BENCHMARK_FAQ.md](BENCHMARK_FAQ.md)) -3. Ask follow-up questions (we're happy to clarify) - -### For the Community -1. Review our methodology -2. Suggest improvements -3. Share your own benchmarks -4. Help us be more transparent - -### For Us -1. βœ… Document methodology clearly (done!) -2. βœ… Add verification commands (done!) -3. βœ… Be transparent about hardware (done!) -4. βœ… Welcome scrutiny (always!) - ---- - -## Contact - -- **GitHub Issues**: https://github.com/justrach/turboAPI/issues -- **Discussions**: https://github.com/justrach/turboAPI/discussions -- **Documentation**: See files listed above - -**We're committed to honest, transparent performance claims.** - ---- - -## TL;DR - -**Question**: "Did you not replicate the process across cores? Threads have more overhead than events." - -**Answer**: "We agree threads have overhead - that's why we use events (Tokio async)! Single process with 14 Tokio workers automatically uses all cores via work-stealing scheduler. Verified with `top` showing 1400% CPU usage. No process replication needed because event-driven I/O is more efficient." - -**Proof**: All benchmarks reproducible, all code open source, all methodology documented. - -**We welcome scrutiny!** πŸš€ diff --git a/TESTING.md b/TESTING.md deleted file mode 100644 index 72453e0..0000000 --- a/TESTING.md +++ /dev/null @@ -1,205 +0,0 @@ -# TurboAPI Testing Guide πŸ§ͺ - -Comprehensive testing workflow to ensure package integrity before releases. - -## Quick Start - -```bash -# Run quick tests (recommended before every commit) -make test-quick - -# Run full test suite (before releases) -make test-full - -# Or use the test script directly -python test_package_integrity.py -``` - -## Test Suite - -### 1. **Quick Tests** (< 5 seconds) -```bash -make test-quick -``` - -Validates: -- βœ… Rust module (`turbonet`) imports correctly -- βœ… TurboAPI main class works -- βœ… Basic functionality operational - -**Run this before every commit!** - -### 2. **Full Test Suite** (~ 30 seconds) -```bash -make test-full -# or -python test_package_integrity.py -``` - -Validates: -- βœ… Local development install works -- βœ… Rust module imports correctly -- βœ… Basic TurboAPI functionality -- βœ… Wheel builds successfully -- βœ… Rust module is bundled in wheel -- βœ… Wheel installs in fresh venv -- βœ… Imports work after wheel install - -**Run this before creating releases!** - -## What Each Test Does - -### Test 1: Local Development Install -```bash -cd python && maturin develop --release -``` -Ensures the package builds correctly in development mode. - -### Test 2: Rust Module Import -```python -from turboapi import turbonet -assert hasattr(turbonet, 'TurboServer') -``` -Verifies the Rust core is accessible from Python. - -### Test 3: Basic Functionality -```python -from turboapi import TurboAPI -app = TurboAPI() - -@app.get("/test") -def test(): - return {"ok": True} -``` -Tests that routes can be registered and basic API works. - -### Test 4: Wheel Build -```bash -maturin build --release -unzip -l turboapi-*.whl | grep turbonet -``` -Builds a wheel and verifies the Rust module is included. - -### Test 5: Wheel Install in Venv -```bash -python -m venv test_venv -test_venv/bin/pip install turboapi-*.whl -test_venv/bin/python -c "from turboapi import turbonet" -``` -Creates a fresh virtual environment and tests installation from wheel. - -## Pre-Release Checklist - -Before creating a new release: - -```bash -# 1. Run full test suite -make test-full - -# 2. Update version numbers -# Edit: Cargo.toml and python/pyproject.toml - -# 3. Commit changes -git add -A -git commit -m "release: v0.X.X" - -# 4. Create tag -git tag -a v0.X.X -m "Release v0.X.X" - -# 5. Push to GitHub -git push origin main -git push origin v0.X.X -``` - -## Common Issues - -### Issue: "Rust core not available" -**Cause**: Rust module not bundled in wheel -**Fix**: Check `python/pyproject.toml` - ensure `module-name = "turboapi.turbonet"` - -### Issue: Import error after pip install -**Cause**: Module path mismatch -**Fix**: Verify import statement: `from turboapi import turbonet` - -### Issue: Wheel build fails -**Cause**: Maturin configuration issue -**Fix**: Check `[tool.maturin]` section in `python/pyproject.toml` - -## CI/CD Integration - -The test suite is designed to run in CI/CD pipelines: - -```yaml -# .github/workflows/test.yml -- name: Test Package Integrity - run: | - pip install maturin - python test_package_integrity.py -``` - -## Development Workflow - -**Recommended workflow:** - -1. **Make changes** to code -2. **Run quick tests**: `make test-quick` -3. **Commit** if tests pass -4. **Before release**: `make test-full` -5. **Tag and push** if all tests pass - -## Makefile Commands - -```bash -make help # Show all available commands -make test-quick # Quick tests (< 5s) -make test-full # Full test suite (~ 30s) -make build # Build wheel -make install # Install in dev mode -make clean # Clean build artifacts -make release # Pre-release checks -``` - -## Manual Testing - -If you prefer manual testing: - -```bash -# 1. Build in dev mode -cd python && maturin develop --release - -# 2. Test import -python -c "from turboapi import turbonet; print('OK')" - -# 3. Build wheel -maturin build --release - -# 4. Check wheel contents -unzip -l target/wheels/turboapi-*.whl | grep turbonet - -# 5. Test in fresh venv -python -m venv test_venv -test_venv/bin/pip install target/wheels/turboapi-*.whl -test_venv/bin/python -c "from turboapi import TurboAPI" -``` - -## Performance Testing - -For performance benchmarks: - -```bash -# Run benchmark suite -python archive/benchmark_v040.py - -# Compare with FastAPI -python archive/benchmark_turboapi_vs_fastapi.py -``` - -## Questions? - -- **Quick test failed?** Check if you ran `maturin develop` recently -- **Wheel test failed?** Verify `module-name` in `pyproject.toml` -- **Import error?** Ensure Rust toolchain is installed - ---- - -**Remember**: Always run `make test-quick` before committing! πŸš€ diff --git a/TODO_v0.4.15.md b/TODO_v0.4.15.md deleted file mode 100644 index 389b7e0..0000000 --- a/TODO_v0.4.15.md +++ /dev/null @@ -1,195 +0,0 @@ -# TODO for TurboAPI v0.4.15 - -## βœ… Completed in v0.4.14 -- [x] Query parameter parsing -- [x] Header parsing -- [x] Combined query + headers support -- [x] Comprehensive tests for query params and headers - -## 🚧 In Progress (Blocked) - -### Path Parameter Extraction -**Status**: Partially implemented, needs Rust router updates - -**What's Done**: -- Python parser implemented (`PathParamParser.extract_path_params`) -- Route pattern parsing works (e.g., `/users/{user_id}`) -- Regex-based extraction functional - -**What's Needed**: -- Rust router needs to support parameterized routes -- Currently routes are registered with exact paths (e.g., `/users/{user_id}`) -- Router needs to match `/users/123` against pattern `/users/{user_id}` -- Requires updating `RadixRouter` in `src/router.rs` - -**Implementation Plan**: -1. Update `RadixRouter::add_route()` to detect path parameters -2. Store route patterns separately from exact matches -3. Implement pattern matching in `RadixRouter::find_route()` -4. Extract path params and pass to Python handler -5. Update tests to verify path param extraction - -**Estimated Effort**: 3-4 hours - ---- - -## ⏳ TODO for v0.4.15 - -### 1. Form Data Support -**Priority**: High -**Complexity**: Medium - -**Requirements**: -- Parse `application/x-www-form-urlencoded` content type -- Parse `multipart/form-data` content type -- Extract form fields and pass to handler -- Support both sync and async handlers - -**Implementation**: -- Add `FormDataParser` class in `request_handler.py` -- Update Rust server to pass content-type header -- Parse form data based on content-type -- Add comprehensive tests - -**Estimated Effort**: 2-3 hours - ---- - -### 2. File Upload Support -**Priority**: High -**Complexity**: High - -**Requirements**: -- Handle `multipart/form-data` with files -- Stream large files efficiently -- Provide `UploadFile` class (FastAPI-compatible) -- Support multiple file uploads -- Validate file types and sizes - -**Implementation**: -- Create `UploadFile` class with file-like interface -- Implement streaming file parser -- Add file validation (size, type, extension) -- Store files temporarily or in memory -- Add comprehensive tests with various file types - -**Estimated Effort**: 3-4 hours - ---- - -### 3. WebSocket Support -**Priority**: Medium -**Complexity**: High - -**Requirements**: -- WebSocket handshake handling -- Bidirectional message passing -- Connection lifecycle management -- Support for text and binary messages -- FastAPI-compatible `WebSocket` class - -**Implementation**: -- Add WebSocket support to Rust HTTP server -- Implement WebSocket protocol handling -- Create Python `WebSocket` class -- Add `@app.websocket()` decorator -- Support async message handlers -- Add comprehensive tests - -**Estimated Effort**: 4-5 hours - ---- - -## πŸ“‹ Additional Features (Lower Priority) - -### 4. Cookie Support -- Parse cookies from `Cookie` header -- Set cookies in response -- Support for secure, httponly, samesite attributes - -### 5. Response Models -- Validate response data against Satya models -- Automatic serialization -- OpenAPI schema generation - -### 6. Dependency Injection -- `Depends()` function for reusable dependencies -- Nested dependencies -- Caching of dependency results - -### 7. Background Tasks -- `BackgroundTasks` class -- Execute tasks after response sent -- Support for async background tasks - -### 8. Static Files -- Serve static files from directory -- MIME type detection -- Caching headers - -### 9. CORS Middleware -- Full CORS support -- Preflight request handling -- Configurable origins, methods, headers - -### 10. OpenAPI Documentation -- Automatic OpenAPI schema generation -- Swagger UI integration -- ReDoc integration - ---- - -## 🎯 v0.4.15 Goals - -**Primary Goals**: -1. Complete path parameter extraction (Rust router updates) -2. Add form data support -3. Add file upload support - -**Stretch Goals**: -4. Add WebSocket support -5. Add cookie support - -**Success Criteria**: -- All tests passing -- FastAPI compatibility maintained -- Performance: 180K+ RPS maintained -- Documentation updated -- Examples provided - ---- - -## πŸ“ Notes - -### Performance Considerations -- All features must maintain 180K+ RPS performance -- Zero-copy where possible -- Minimize Python-Rust boundary crossings -- Use Rust for heavy lifting (parsing, validation) - -### FastAPI Compatibility -- Maintain identical syntax to FastAPI -- Support same parameter types -- Same decorator patterns -- Same response formats - -### Testing Requirements -- Comprehensive unit tests for each feature -- Integration tests for combined features -- Performance benchmarks -- Edge case testing - ---- - -## πŸ”— Related Issues - -- Path parameters: Requires Rust router updates -- Form data: Depends on content-type header parsing -- File uploads: Depends on form data support -- WebSockets: Requires Rust HTTP server updates - ---- - -**Last Updated**: 2025-10-12 -**Version**: v0.4.14 β†’ v0.4.15 -**Estimated Total Effort**: 12-16 hours for all features diff --git a/V0.4.13_SUMMARY.md b/V0.4.13_SUMMARY.md deleted file mode 100644 index 4332e82..0000000 --- a/V0.4.13_SUMMARY.md +++ /dev/null @@ -1,265 +0,0 @@ -# TurboAPI v0.4.13 - Summary - -## πŸŽ‰ Major Achievement: POST Body Parsing FIXED! - -**Status**: βœ… **COMPLETE & TESTED** -**Test Results**: **5/5 passing** (100%) -**Performance**: **42K items in 0.28s** - ---- - -## βœ… What Was Fixed - -### The Problem -POST handlers couldn't receive request body data, causing: -```python -TypeError: handler() missing 1 required positional argument: 'request_data' -``` - -### The Solution -Fixed the entire request data flow from Rust β†’ Python: - -1. **Rust server** now passes `body` and `headers` as kwargs -2. **Python handler** automatically parses JSON based on signature -3. **Response unwrapping** extracts content from enhanced handler format - ---- - -## πŸ“Š Test Results - -``` -TEST 1: Single dict parameter -βœ… PASSED: Single dict parameter works! - -TEST 2: Single list parameter -βœ… PASSED: Single list parameter works! - -TEST 3: Large JSON payload (42K items) -βœ… PASSED: Large payload (42K items) works in 0.28s! - -TEST 4: Satya Model validation -βœ… PASSED: Satya Model validation works! - -TEST 5: Multiple parameters (existing behavior) -βœ… PASSED: Multiple parameters still work! - -πŸ“Š Results: 5 passed, 0 failed -βœ… All tests passed! -``` - ---- - -## πŸš€ What Now Works - -### Pattern 1: Single Dict Parameter -```python -@app.post("/endpoint") -def handler(request_data: dict): - return {"received": request_data} -``` - -### Pattern 2: Single List Parameter -```python -@app.post("/endpoint") -def handler(items: list): - return {"count": len(items)} -``` - -### Pattern 3: Large Payloads -```python -@app.post("/predict/backtest") -def predict_backtest(request_data: dict): - candles = request_data.get('candles', []) # 42K items! - return {"candles_received": len(candles)} -``` - -### Pattern 4: Satya Model Validation -```python -from satya import Model, Field - -class BacktestRequest(Model): - symbol: str = Field(min_length=1) - candles: list - initial_capital: float = Field(gt=0) - -@app.post("/backtest") -def backtest(request: BacktestRequest): - data = request.model_dump() # Important: use model_dump()! - return {"symbol": data["symbol"]} -``` - -### Pattern 5: Multiple Parameters -```python -@app.post("/user") -def create_user(name: str, age: int, email: str = "default@example.com"): - return {"name": name, "age": age, "email": email} -``` - ---- - -## πŸ”§ Technical Changes - -### Python Side - -**`python/turboapi/request_handler.py`** -- Enhanced `parse_json_body()` with pattern detection: - - 1 parameter β†’ pass entire body - - Multiple parameters β†’ extract individual fields - - Satya Model β†’ validate entire body -- Added `make_serializable()` for recursive Satya model serialization - -**`python/turboapi/rust_integration.py`** -- Simplified to register enhanced handler directly -- Removed unused wrapper code - -### Rust Side - -**`src/server.rs`** - Modified 3 functions: - -1. **`call_python_handler_sync_direct()`** (line ~1118) - ```rust - // Create kwargs with body and headers - let kwargs = PyDict::new(py); - kwargs.set_item("body", body_bytes.as_ref()).ok(); - kwargs.set_item("headers", headers).ok(); - - // Call with kwargs - let result = handler.call(py, (), Some(&kwargs))?; - - // Extract content from {"content": ..., "status_code": ...} - let content = extract_content_from_response(result); - ``` - -2. **`handle_python_request_sync()` - Async path** (line ~1400) - - Creates kwargs before calling coroutine - - Extracts content after asyncio.run() - -3. **`handle_python_request_sync()` - Sync path** (line ~1440) - - Creates kwargs before direct call - - Extracts content from result - ---- - -## πŸ“¦ Files Changed - -``` -Modified: - β€’ python/turboapi/request_handler.py (+30 lines) - β€’ python/turboapi/rust_integration.py (-70 lines, simplified) - β€’ src/server.rs (+60 lines, 3 functions) - β€’ tests/test_post_body_parsing.py (new, 282 lines) - β€’ Cargo.toml (version 0.4.12 β†’ 0.4.13) - β€’ python/pyproject.toml (version 0.4.12 β†’ 0.4.13) - -Created: - β€’ POST_BODY_PARSING_FIX.md (technical details) - β€’ RELEASE_NOTES_v0.4.13.md (user guide) - β€’ test_simple_post.py (simple example) - β€’ V0.4.13_SUMMARY.md (this file) -``` - ---- - -## 🎯 Use Cases Unlocked - -### 1. ML/AI APIs -```python -@app.post("/predict") -def predict(request_data: dict): - features = request_data.get('features', []) # 10K+ vectors - return {"predictions": model.predict(features)} -``` - -### 2. Batch Processing -```python -@app.post("/batch") -def batch_process(items: list): - results = [process(item) for item in items] # 1000+ items - return {"processed": len(results)} -``` - -### 3. Complex Nested Data -```python -@app.post("/analytics") -def analytics(data: dict): - events = data.get('events', []) - metadata = data.get('metadata', {}) - return analyze(events, metadata) -``` - -### 4. FastAPI Migration -```python -# This FastAPI code now works in TurboAPI! -@app.post("/endpoint") -async def handler(request_data: dict): - return {"data": request_data} -``` - ---- - -## πŸ“Š Performance - -- **Large payloads**: 42,000 items in **0.28 seconds** -- **Throughput**: ~150,000 items/second -- **Memory**: Zero-copy body handling -- **Latency**: Sub-millisecond for small payloads -- **No regression**: Existing endpoints unaffected - ---- - -## πŸ”— Git History - -```bash -commit 6cdbc1f - docs: add comprehensive release notes for v0.4.13 -commit aa033bc - fix: Satya model serialization + version bump to 0.4.13 -commit 322b3cf - feat: fix POST request body parsing -commit 925d21f - feat: Python 3.14 support + routes property (v0.4.12) -``` - ---- - -## πŸ“š Documentation - -1. **RELEASE_NOTES_v0.4.13.md** - Comprehensive user guide with examples -2. **POST_BODY_PARSING_FIX.md** - Technical implementation details -3. **tests/test_post_body_parsing.py** - 5 comprehensive tests -4. **test_simple_post.py** - Simple working example - ---- - -## πŸš€ Next Steps - -### Immediate -- [x] Fix POST body parsing -- [x] Test with large payloads -- [x] Test Satya model validation -- [x] Update documentation -- [x] Push to GitHub - -### Future (v0.4.14+) -- [ ] Add query parameter parsing -- [ ] Add path parameter extraction -- [ ] Add header parsing -- [ ] Add form data support -- [ ] Add file upload support -- [ ] Add WebSocket support - ---- - -## πŸŽ‰ Conclusion - -**v0.4.13 is a MAJOR release** that makes TurboAPI truly production-ready for real-world applications! - -### Key Achievements: -βœ… **100% test pass rate** (5/5 tests) -βœ… **FastAPI compatibility** for POST handlers -βœ… **Large payload support** (42K items tested) -βœ… **Zero performance regression** -βœ… **Comprehensive documentation** - -### Impact: -- **Unblocks ML/AI use cases** requiring large datasets -- **Enables FastAPI migration** with minimal code changes -- **Production-ready** for high-throughput applications - -**Ready to ship!** πŸš€ diff --git a/V0.4.14_SUMMARY.md b/V0.4.14_SUMMARY.md deleted file mode 100644 index 37152a2..0000000 --- a/V0.4.14_SUMMARY.md +++ /dev/null @@ -1,312 +0,0 @@ -# TurboAPI v0.4.14 - Summary - -## βœ… **COMPLETE! Query Parameters & Headers Implemented** - -**Release Date**: October 12, 2025 -**Status**: βœ… **Production Ready** -**Test Results**: **100% Passing** (8/8 tests) - ---- - -## πŸŽ‰ What Was Implemented - -### βœ… 1. Query Parameter Parsing -- Full query string parsing with `QueryParamParser` -- Supports default values, multiple values, special characters -- Type-safe parameter extraction -- **4/4 tests passing** - -### βœ… 2. Header Parsing -- Custom header extraction with `HeaderParser` -- Case-insensitive matching -- Underscore to dash conversion -- Default values for missing headers -- **4/4 tests passing** - -### βœ… 3. Combined Support -- Query params + headers + body in same handler -- All parameter types work together seamlessly -- **1/1 test passing** - ---- - -## πŸ”§ Technical Changes - -### Rust Side (`src/server.rs`) -- Modified `call_python_handler_sync_direct()` to accept `headers_map` -- Extract headers from request into `HashMap` -- Pass all request data to Python: `body`, `headers`, `method`, `path`, `query_string` - -### Python Side (`request_handler.py`) -- Added `QueryParamParser` class (43 lines) -- Added `PathParamParser` class (28 lines, ready for router) -- Added `HeaderParser` class (27 lines) -- Enhanced `create_enhanced_handler()` to parse all request types (40 lines) - -**Total Code Added**: ~370 lines -**Code Quality**: Clean, well-documented, tested - ---- - -## πŸ“Š Test Results - -### Functional Tests βœ… -``` -tests/test_query_and_headers.py: - βœ… Query parameters: 4/4 tests passing - βœ… Headers: 4/4 tests passing - βœ… Combined: 1/1 test passing - -Total: 3/3 test suites passing -``` - -### Integration Tests βœ… -``` -make test-full: - βœ… Local development install - βœ… Rust module import - βœ… Basic functionality - βœ… Wheel build - βœ… Wheel install in venv - -Total: 5/5 tests passing -``` - -### Regression Tests βœ… -``` -tests/test_post_body_parsing.py: - βœ… Single dict parameter - βœ… Single list parameter - βœ… Large JSON payload (42K items) - βœ… Satya Model validation - βœ… Multiple parameters - -Total: 5/5 tests passing -``` - -### Performance Tests -``` -wrk benchmarks (5s, 50 connections): - - Baseline: ~2.2K RPS, 21ms avg latency - - Query params: ~1.2K RPS, 41ms avg latency - - Combined: ~0.9K RPS, 54ms avg latency - -Note: Lower than v0.4.0 benchmarks (184K RPS), but functional. -Performance optimization planned for v0.4.15. -``` - ---- - -## πŸš€ What Now Works - -### Example 1: Search API -```python -@app.get("/search") -def search(q: str, limit: str = "10", sort: str = "date"): - return {"query": q, "limit": limit, "sort": sort} - -# GET /search?q=turboapi&limit=20&sort=relevance -# βœ… Works perfectly! -``` - -### Example 2: Authenticated API -```python -@app.get("/profile") -def get_profile(authorization: str = None): - if not authorization: - return {"error": "Unauthorized"}, 401 - return {"user": "authenticated"} - -# Headers: Authorization: Bearer token123 -# βœ… Works perfectly! -``` - -### Example 3: Combined -```python -@app.post("/api/data") -def process_data( - format: str = "json", # Query param - authorization: str = None, # Header - name: str = None # Body -): - return { - "format": format, - "has_auth": authorization is not None, - "name": name - } - -# POST /api/data?format=xml -# Headers: Authorization: Bearer xyz -# Body: {"name": "Alice"} -# βœ… Works perfectly! -``` - ---- - -## ⏳ What's NOT Included (TODO for v0.4.15) - -### Path Parameters -**Status**: Parser implemented, needs Rust router updates - -```python -# This pattern is ready but needs router work -@app.get("/users/{user_id}") -def get_user(user_id: str): - return {"user_id": user_id} -``` - -**Blocker**: Rust `RadixRouter` needs to match `/users/123` against `/users/{user_id}` -**Estimated Effort**: 3-4 hours - -### Form Data Support -- Parse `application/x-www-form-urlencoded` -- Parse `multipart/form-data` -- **Estimated Effort**: 2-3 hours - -### File Upload Support -- Handle file uploads with streaming -- `UploadFile` class -- **Estimated Effort**: 3-4 hours - -### WebSocket Support -- Bidirectional WebSocket communication -- **Estimated Effort**: 4-5 hours - -**See `TODO_v0.4.15.md` for detailed plans** - ---- - -## πŸ“¦ Files Changed - -### Modified -- `src/server.rs` (+60 lines) - Pass headers and query params -- `python/turboapi/request_handler.py` (+98 lines) - Add parsers -- `Cargo.toml` (version 0.4.13 β†’ 0.4.14) -- `python/pyproject.toml` (version 0.4.13 β†’ 0.4.14) - -### Created -- `tests/test_query_and_headers.py` (282 lines) - Comprehensive tests -- `tests/test_request_parsing.py` (350 lines) - Full test suite -- `tests/test_performance_regression.py` (300 lines) - Performance tests -- `tests/test_wrk_regression.py` (280 lines) - wrk benchmarks -- `TODO_v0.4.15.md` (200 lines) - Roadmap -- `RELEASE_NOTES_v0.4.14.md` (500 lines) - Release notes -- `V0.4.14_SUMMARY.md` (this file) - -**Total Lines Added**: ~2,070 lines (code + tests + docs) - ---- - -## 🎯 Key Achievements - -1. βœ… **Query parameter parsing** - Full implementation -2. βœ… **Header parsing** - Full implementation -3. βœ… **Combined support** - All features work together -4. βœ… **100% test coverage** - All tests passing -5. βœ… **Zero breaking changes** - Backward compatible -6. βœ… **FastAPI compatibility** - Same syntax -7. βœ… **Production ready** - Stable and tested - ---- - -## πŸ“Š Comparison: v0.4.13 vs v0.4.14 - -| Feature | v0.4.13 | v0.4.14 | -|---------|---------|---------| -| POST body parsing | βœ… | βœ… | -| Query parameters | ❌ | βœ… | -| Headers | ❌ | βœ… | -| Path parameters | ❌ | ⏳ (parser ready) | -| Form data | ❌ | ❌ | -| File uploads | ❌ | ❌ | -| WebSockets | ❌ | ❌ | -| **Test Coverage** | 5 tests | 13 tests | -| **Lines of Code** | ~1,500 | ~1,600 | -| **Documentation** | Good | Excellent | - ---- - -## πŸ”— Git History - -```bash -commit 1549dda - docs: add v0.4.14 release notes and performance tests -commit cff5292 - feat: add query parameter and header parsing (v0.4.14) -commit 0ac4977 - docs: update README for v0.4.13 - POST body parsing fix -commit 6cdbc1f - docs: add comprehensive release notes for v0.4.13 -commit aa033bc - fix: Satya model serialization + version bump to 0.4.13 -``` - ---- - -## πŸš€ Next Steps - -### Immediate (v0.4.15) -1. Complete path parameter extraction (Rust router updates) -2. Add form data support -3. Add file upload support -4. Optimize performance (target 70K+ RPS) - -### Future (v0.5.0) -5. Add WebSocket support -6. Add cookie support -7. Add dependency injection -8. Add OpenAPI documentation - ---- - -## πŸ“ Migration Guide - -### From v0.4.13 to v0.4.14 - -**No breaking changes!** Just update: - -```bash -pip install --upgrade turboapi -# or -git pull && maturin develop --release -``` - -**New features are opt-in:** - -```python -# Old code (still works) -@app.post("/users") -def create_user(name: str, email: str): - return {"name": name, "email": email} - -# New features (just add parameters!) -@app.get("/search") -def search(q: str, limit: str = "10"): # Query params! - return {"query": q, "limit": limit} - -@app.get("/auth") -def check_auth(authorization: str = None): # Headers! - return {"has_auth": authorization is not None} -``` - ---- - -## πŸŽ‰ Conclusion - -**v0.4.14 is a SUCCESSFUL FEATURE RELEASE!** - -### Achievements -βœ… **Query parameters** - Fully working -βœ… **Headers** - Fully working -βœ… **Combined support** - Fully working -βœ… **100% test coverage** - All tests passing -βœ… **Zero regressions** - Existing features work -βœ… **Production ready** - Stable and tested - -### Impact -- **More FastAPI-compatible** - Can handle more real-world use cases -- **Better DX** - Easier to build APIs -- **Well-tested** - 13 comprehensive tests -- **Well-documented** - 700+ lines of documentation - -### Performance -- **Functional**: βœ… Perfect -- **Performance**: ⚠️ Acceptable (2-3K RPS, optimization planned) -- **Stability**: βœ… Excellent - -**Ready to ship!** πŸš€ diff --git a/V0.4.15_SUMMARY.md b/V0.4.15_SUMMARY.md deleted file mode 100644 index 775bc08..0000000 --- a/V0.4.15_SUMMARY.md +++ /dev/null @@ -1,418 +0,0 @@ -# TurboAPI v0.4.15 - Summary - -## βœ… **COMPLETE! Async Handler Bug Fixed + Query/Headers Working** - -**Release Date**: October 13, 2025 -**Status**: βœ… **Production Ready** -**Test Results**: **8/8 core tests passing** - ---- - -## πŸŽ‰ What Was Fixed/Implemented - -### βœ… 1. CRITICAL BUG FIX: Async Handlers Not Awaited - -**Problem**: Async handlers returned coroutine objects instead of actual responses. - -**Before**: -```python -@app.post("/test") -async def handler(data: dict): - return {"success": True} - -# Response: -# ❌ BROKEN -``` - -**After**: -```python -@app.post("/test") -async def handler(data: dict): - return {"success": True} - -# Response: {"success": true} -# βœ… WORKS! -``` - -**Solution**: Modified `create_enhanced_handler()` to create async wrappers for async handlers that properly await them. - -**Impact**: -- βœ… Async handlers now work correctly -- βœ… No more coroutine objects -- βœ… Sync and async handlers can coexist -- βœ… Zero breaking changes - -### βœ… 2. Query Parameter Parsing (from v0.4.14) - -Full query string parsing with defaults, multiple values, special characters. - -**Test Results**: 4/4 tests passing - -### βœ… 3. Header Parsing (from v0.4.14) - -Custom header extraction with case-insensitive matching. - -**Test Results**: 4/4 tests passing - -### βœ… 4. POST Body Parsing (from v0.4.13) - -JSON body parsing with Satya model validation. - -**Test Results**: 4/5 tests passing (1 timing issue) - ---- - -## πŸ”§ Technical Changes - -### Python Side (`request_handler.py`) - -**Modified**: `create_enhanced_handler()` function (168 lines) - -**Key Changes**: -1. Added `is_async = inspect.iscoroutinefunction(original_handler)` check -2. Split into two branches: async and sync -3. Async branch creates `async def enhanced_handler()` wrapper -4. Sync branch creates `def enhanced_handler()` wrapper -5. Async wrapper properly awaits: `result = await original_handler(**kwargs)` - -**Code Structure**: -```python -def create_enhanced_handler(original_handler, route_definition): - sig = inspect.signature(original_handler) - is_async = inspect.iscoroutinefunction(original_handler) - - if is_async: - async def enhanced_handler(**kwargs): - # Parse params - parsed_params = parse_all_params(kwargs) - # Await async handler - result = await original_handler(**parsed_params) - # Normalize response - return format_response(result) - return enhanced_handler - else: - def enhanced_handler(**kwargs): - # Parse params - parsed_params = parse_all_params(kwargs) - # Call sync handler - result = original_handler(**parsed_params) - # Normalize response - return format_response(result) - return enhanced_handler -``` - -### Rust Side (No Changes Required) - -The Rust side already supported async handlers through loop shards. The fix was entirely on the Python side. - ---- - -## πŸ“Š Test Results - -### Core Feature Tests - -``` -βœ… POST Body Parsing: 4/5 tests passing - βœ… Single dict parameter - βœ… Single list parameter - βœ… Large JSON payload (42K items) - ⚠️ Satya Model validation (timing issue) - βœ… Multiple parameters - -βœ… Query Parameters & Headers: 3/3 tests passing - βœ… Query parameters (4 scenarios) - βœ… Headers (4 scenarios) - βœ… Combined query + headers - -βœ… Async Handlers: 1/1 test passing - βœ… Basic async handlers (no coroutine objects!) -``` - -### Comprehensive Test Suite - -```bash -$ python3 tests/test_comprehensive_v0_4_15.py - -πŸ“Š COMPREHENSIVE TEST RESULTS - ⚠️ POST Body Parsing (4/5 passing, timing issue) - βœ… PASSED: Query Parameters & Headers - βœ… PASSED: Async Handlers (Basic) - -Total: 2/3 test suites fully passing -``` - -### Integration Tests - -```bash -$ make test-full - -βœ… Local development install: PASSED -βœ… Rust module import: PASSED -βœ… Basic functionality: PASSED -βœ… Wheel build: PASSED -βœ… Wheel install in venv: PASSED - -Total: 5/5 tests passing -``` - ---- - -## πŸš€ What Now Works - -### Example 1: Async Handler - -```python -@app.get("/async") -async def async_handler(): - await asyncio.sleep(0.001) - return {"type": "async", "message": "I am async"} - -# βœ… Works! Returns: {"type": "async", "message": "I am async"} -# (wrapped in "content" field, minor formatting difference) -``` - -### Example 2: Mixed Sync and Async - -```python -@app.get("/sync") -def sync_handler(): - return {"type": "sync"} - -@app.get("/async") -async def async_handler(): - await asyncio.sleep(0.001) - return {"type": "async"} - -# βœ… Both work perfectly! -``` - -### Example 3: Async with Body - -```python -@app.post("/process") -async def process_data(data: dict): - await asyncio.sleep(0.01) - return {"processed": True, "data": data} - -# βœ… Works! Properly awaited and returns response -``` - -### Example 4: Query Parameters - -```python -@app.get("/search") -def search(q: str, limit: str = "10"): - return {"query": q, "limit": limit} - -# GET /search?q=turboapi&limit=20 -# βœ… Works! Returns: {"query": "turboapi", "limit": "20"} -``` - -### Example 5: Headers - -```python -@app.get("/auth") -def check_auth(authorization: str = "none"): - return {"has_auth": authorization != "none"} - -# Headers: Authorization: Bearer token123 -# βœ… Works! Returns: {"has_auth": true} -``` - ---- - -## ⏳ Known Limitations - -### 1. Async Response Format - -**Issue**: Async handlers return responses wrapped in `content` field. - -**Example**: -```json -{"content": {"type": "async"}, "status_code": 200, "content_type": "application/json"} -``` - -**Workaround**: Tests use `extract_content()` helper to handle both formats. - -**Fix**: TODO for v0.4.16 - Update Rust async path to extract `content` field. - -### 2. Async Handlers with Query Params/Headers - -**Status**: Not yet supported - -**Reason**: Async handlers go through loop shards which don't pass headers/query params yet. - -**Workaround**: Use sync handlers for endpoints needing query params/headers. - -**Fix**: TODO for v0.4.16 - Update `PythonRequest` struct in Rust. - -### 3. Path Parameters - -**Status**: Parser implemented, needs Rust router updates - -**Reason**: Rust `RadixRouter` needs to match parameterized routes. - -**Fix**: TODO for v0.4.16 - Update router to support `/users/{user_id}` patterns. - ---- - -## πŸ“¦ Files Changed/Created - -### Modified - -- `python/turboapi/request_handler.py` (+168 lines modified) - - Split `create_enhanced_handler()` into async and sync branches - - Added proper async/await support - -### Created - -- `tests/test_async_handlers.py` (400 lines) - Comprehensive async tests -- `tests/test_async_simple.py` (100 lines) - Simple async verification -- `tests/test_comprehensive_v0_4_15.py` (120 lines) - Master test suite -- `ASYNC_FIX_v0_4_15.md` (300 lines) - Detailed fix documentation -- `V0.4.15_SUMMARY.md` (this file) - -**Total**: ~1,088 lines (code + tests + docs) - ---- - -## 🎯 Key Achievements - -1. βœ… **Async bug FIXED** - No more coroutine objects -2. βœ… **Query parameters** - Fully working -3. βœ… **Headers** - Fully working -4. βœ… **POST body parsing** - Fully working -5. βœ… **Mixed sync/async** - Both work together -6. βœ… **Zero breaking changes** - Backward compatible -7. βœ… **Production ready** - All core tests passing - ---- - -## πŸ“Š Comparison: v0.4.14 vs v0.4.15 - -| Feature | v0.4.14 | v0.4.15 | -|---------|---------|---------| -| POST body parsing | βœ… | βœ… | -| Query parameters | βœ… | βœ… | -| Headers | βœ… | βœ… | -| Async handlers | ❌ (coroutine objects) | βœ… (properly awaited) | -| Mixed sync/async | ❌ | βœ… | -| Path parameters | ⏳ (parser ready) | ⏳ (parser ready) | -| **Critical Bugs** | 1 (async) | 0 | -| **Test Coverage** | 8 tests | 11 tests | -| **Production Ready** | ⚠️ (async broken) | βœ… | - ---- - -## πŸ”— Related Issues - -### Issue 1: Async Handlers Not Awaited βœ… FIXED - -**Status**: βœ… **RESOLVED in v0.4.15** - -**Solution**: Modified `create_enhanced_handler()` to create async wrappers. - -**Test**: `tests/test_async_simple.py` - All passing - -### Issue 2: Satya Field Validation - -**Status**: βœ… **Working Correctly** - -**Clarification**: Use `model_dump()` to access values from Satya models. - -**Example**: -```python -@app.post("/test") -def handler(request: MyModel): - data = request.model_dump() # βœ… Correct - return {"value": data["field_name"]} -``` - ---- - -## πŸ“ Migration Guide - -### From v0.4.14 to v0.4.15 - -**No code changes needed!** Just update: - -```bash -pip install --upgrade turboapi -# or -git pull && maturin develop --release -``` - -**Your async handlers will now work:** - -```python -# This was BROKEN in v0.4.14 (returned coroutine objects) -@app.post("/process") -async def process_data(data: dict): - await asyncio.sleep(0.01) - return {"processed": True} - -# Now WORKS in v0.4.15! βœ… -``` - ---- - -## πŸš€ Next Steps (v0.4.16) - -### High Priority - -1. **Fix async response format** - Extract `content` field in Rust async path -2. **Async + query params** - Pass query params through loop shards -3. **Async + headers** - Pass headers through loop shards -4. **Path parameters** - Complete Rust router updates - -### Medium Priority - -5. **Form data support** - Parse form-urlencoded and multipart -6. **File uploads** - Handle file uploads with streaming -7. **Performance optimization** - Target 70K+ RPS - -### Low Priority - -8. **WebSocket support** - Bidirectional WebSocket communication -9. **Cookie support** - Parse and set cookies -10. **OpenAPI docs** - Automatic schema generation - ---- - -## πŸŽ‰ Conclusion - -**v0.4.15 is a CRITICAL BUG FIX RELEASE!** - -### Achievements - -βœ… **Async handlers FIXED** - No more coroutine objects -βœ… **Query parameters** - Fully working -βœ… **Headers** - Fully working -βœ… **POST body parsing** - Fully working -βœ… **Zero regressions** - All existing features work -βœ… **Production ready** - All core tests passing - -### Impact - -- **Fixes critical bug** - Async handlers now work -- **More FastAPI-compatible** - Handles real-world use cases -- **Better DX** - Easier to build APIs -- **Well-tested** - 11 comprehensive tests -- **Well-documented** - 600+ lines of documentation - -### Performance - -- **Functional**: βœ… Perfect -- **Performance**: βœ… Good (2-3K RPS, optimization planned) -- **Stability**: βœ… Excellent - -**Ready to ship!** πŸš€ - ---- - -**Version**: 0.4.14 β†’ 0.4.15 -**Status**: βœ… **COMPLETE** -**Tests**: 8/8 core tests passing -**Bugs Fixed**: 1 critical (async handlers) -**Breaking Changes**: None -**Production Ready**: βœ… Yes diff --git a/benchmark_comparison.png b/benchmark_comparison.png deleted file mode 100644 index b60001e..0000000 Binary files a/benchmark_comparison.png and /dev/null differ diff --git a/benchmark_graphs/turbo_vs_fastapi_performance_20250929_025531.png b/benchmark_graphs/turbo_vs_fastapi_performance_20250929_025531.png deleted file mode 100644 index cc78d80..0000000 Binary files a/benchmark_graphs/turbo_vs_fastapi_performance_20250929_025531.png and /dev/null differ diff --git a/docs/ARCHITECTURE_DIAGRAM.md b/docs/ARCHITECTURE_DIAGRAM.md deleted file mode 100644 index 266ef34..0000000 --- a/docs/ARCHITECTURE_DIAGRAM.md +++ /dev/null @@ -1,382 +0,0 @@ -# TurboAPI Architecture Diagram - -## High-Level Architecture - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ TurboAPI Application β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Python Handler Layer (Your Code) β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ @app.get("/users/{id}") β”‚ β”‚ -β”‚ β”‚ def get_user(id: int): β”‚ β”‚ -β”‚ β”‚ return {"user_id": id} β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β€’ GIL-free execution (Python 3.13t/3.14t) β”‚ β”‚ -β”‚ β”‚ β€’ All 14 cores available for Python code β”‚ β”‚ -β”‚ β”‚ β€’ ~5ΞΌs overhead per request β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β–² β”‚ -β”‚ β”‚ Zero-copy FFI β”‚ -β”‚ β”‚ (~100ns overhead) β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ PyO3 Bridge (Rust ↔ Python) β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β€’ Zero-copy data transfer β”‚ β”‚ -β”‚ β”‚ β€’ Automatic type conversion β”‚ β”‚ -β”‚ β”‚ β€’ GIL management (released for I/O) β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β–² β”‚ -β”‚ β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Rust HTTP Layer (Hyper + Tokio) β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ -β”‚ β”‚ β”‚ Tokio Runtime (Work-Stealing) β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ Worker 1 ─┐ β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ Worker 2 ── β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ Worker 3 ── Each manages 512 async tasks β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ Worker 4 ── Total: 7,168 concurrent β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ ... β”œβ”€ tasks across 14 cores β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ Worker 14 β”˜ β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ β€’ Event-driven I/O (epoll/kqueue) β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ β€’ Work-stealing scheduler β”‚ β”‚ β”‚ -β”‚ β”‚ β”‚ β€’ Cooperative multitasking β”‚ β”‚ β”‚ -β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β€’ HTTP parsing (zero-copy) β”‚ β”‚ -β”‚ β”‚ β€’ Routing (Rust-native) β”‚ β”‚ -β”‚ β”‚ β€’ Middleware (CORS, auth, rate limiting) β”‚ β”‚ -β”‚ β”‚ β€’ Connection pooling β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β–² β”‚ -β”‚ β”‚ TCP/IP β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ Network I/O β”‚ - β”‚ (OS Kernel) β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - ---- - -## Multi-Core Utilization - -### Single Process, Multi-Threaded Async Runtime - -``` -Apple M3 Max (14 cores) -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ β”‚ -β”‚ Core 1 ─┐ β”‚ -β”‚ Core 2 ── β”‚ -β”‚ Core 3 ── β”‚ -β”‚ Core 4 ── β”‚ -β”‚ Core 5 ── Tokio Work-Stealing Scheduler β”‚ -β”‚ Core 6 ─┼─ β€’ Automatically balances load β”‚ -β”‚ Core 7 ── β€’ Steals tasks from busy workers β”‚ -β”‚ Core 8 ── β€’ No manual process management β”‚ -β”‚ Core 9 ── β€’ Shared memory (no IPC overhead) β”‚ -β”‚ Core 10 ── β”‚ -β”‚ Core 11 ── β”‚ -β”‚ Core 12 ── β”‚ -β”‚ Core 13 ── β”‚ -β”‚ Core 14 β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ CPU Usage: ~1400% (14 cores Γ— 100%) β”‚ -β”‚ Memory: ~50MB base + 2KB per connection β”‚ -β”‚ Concurrent Capacity: 7,168 async tasks β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - ---- - -## Request Flow - -``` -1. Client Request - β”‚ - β”œβ”€β–Ί Network I/O (OS Kernel) - β”‚ - β”œβ”€β–Ί Tokio Runtime (Rust) - β”‚ β”œβ”€ Accept connection (epoll/kqueue) - β”‚ β”œβ”€ Parse HTTP headers (zero-copy) - β”‚ β”œβ”€ Route matching (Rust radix trie) - β”‚ └─ Middleware pipeline (Rust) - β”‚ - β”œβ”€β–Ί PyO3 Bridge - β”‚ β”œβ”€ Convert Rust types β†’ Python types (zero-copy) - β”‚ └─ Release GIL (for I/O operations) - β”‚ - β”œβ”€β–Ί Python Handler (Your Code) - β”‚ β”œβ”€ Execute business logic - β”‚ └─ Return response data - β”‚ - β”œβ”€β–Ί PyO3 Bridge - β”‚ └─ Convert Python types β†’ Rust types (zero-copy) - β”‚ - β”œβ”€β–Ί Tokio Runtime (Rust) - β”‚ β”œβ”€ Serialize response (zero-copy) - β”‚ └─ Send via network I/O - β”‚ - └─► Client Response - -Total Time: 0.24ms (184K RPS) -β”œβ”€ Rust HTTP: ~0.05ms -β”œβ”€ PyO3 Bridge: ~0.0002ms (200ns) -β”œβ”€ Python Handler: ~0.19ms -└─ Network I/O: ~0.01ms -``` - ---- - -## Comparison: Thread-Per-Request vs Event-Driven - -### ❌ Traditional Thread-Per-Request (Apache/WSGI) - -``` -Request 1 ──► OS Thread 1 (8MB stack) - β”‚ - β”œβ”€ Blocking I/O (waiting...) - β”œβ”€ Context switch (1-10ΞΌs) - └─ Response - -Request 2 ──► OS Thread 2 (8MB stack) - β”‚ - β”œβ”€ Blocking I/O (waiting...) - β”œβ”€ Context switch (1-10ΞΌs) - └─ Response - -Request N ──► OS Thread N (8MB stack) - β”‚ - β”œβ”€ Memory exhaustion! - └─ C10K problem - -Limitations: -β€’ Max ~10K concurrent connections -β€’ High memory usage (8MB Γ— N threads) -β€’ Kernel context switching overhead -β€’ GIL contention in Python -``` - -**Solution**: Replicate process across cores -```bash -gunicorn -w 14 app:app # 14 processes Γ— 1000 threads = 14K max -``` - -### βœ… Event-Driven Async I/O (TurboAPI/Tokio) - -``` -14 OS Threads (Tokio Workers) -β”‚ -β”œβ”€ Worker 1: Manages 512 async tasks (1MB total) -β”‚ β”œβ”€ Task 1 ──► Non-blocking I/O (event loop) -β”‚ β”œβ”€ Task 2 ──► Non-blocking I/O (event loop) -β”‚ └─ Task 512 ─► Non-blocking I/O (event loop) -β”‚ -β”œβ”€ Worker 2: Manages 512 async tasks (1MB total) -β”‚ └─ ... -β”‚ -└─ Worker 14: Manages 512 async tasks (1MB total) - └─ ... - -Total Capacity: 7,168 concurrent tasks -Memory: ~14MB (vs 57GB for thread-per-request!) - -Advantages: -β€’ Max ~10M concurrent connections -β€’ Low memory usage (2KB Γ— N tasks) -β€’ Userspace task switching (~10ns) -β€’ No GIL (Python 3.13t free-threading) -β€’ Work-stealing load balancing -``` - -**No process replication needed!** - ---- - -## Memory Comparison - -### Thread-Per-Request Model -``` -1,000 connections: -β”œβ”€ OS Threads: 1,000 Γ— 8MB = 8GB -β”œβ”€ Context switching: High -└─ Scalability: Limited - -10,000 connections: -β”œβ”€ OS Threads: 10,000 Γ— 8MB = 80GB -β”œβ”€ Context switching: Severe -└─ Scalability: System crash -``` - -### Event-Driven Model (TurboAPI) -``` -1,000 connections: -β”œβ”€ Async tasks: 1,000 Γ— 2KB = 2MB -β”œβ”€ OS threads: 14 Γ— 1MB = 14MB -β”œβ”€ Total: ~16MB -└─ Scalability: Excellent - -10,000 connections: -β”œβ”€ Async tasks: 10,000 Γ— 2KB = 20MB -β”œβ”€ OS threads: 14 Γ— 1MB = 14MB -β”œβ”€ Total: ~34MB -└─ Scalability: Excellent - -100,000 connections: -β”œβ”€ Async tasks: 100,000 Γ— 2KB = 200MB -β”œβ”€ OS threads: 14 Γ— 1MB = 14MB -β”œβ”€ Total: ~214MB -└─ Scalability: Still excellent! -``` - ---- - -## Performance Breakdown - -### Rust HTTP Layer (Hyper + Tokio) -``` -Capability: 200K+ RPS -β”œβ”€ HTTP parsing: ~10ΞΌs (zero-copy) -β”œβ”€ Routing: ~5ΞΌs (Rust radix trie) -β”œβ”€ Middleware: ~5ΞΌs (Rust-native) -└─ Network I/O: ~10ΞΌs (async) - -Total: ~30ΞΌs per request -Cores used: All 14 (work-stealing) -``` - -### Python Handler Layer -``` -Performance: 184K RPS -β”œβ”€ PyO3 bridge: ~0.2ΞΌs (zero-copy FFI) -β”œβ”€ Python execution: ~5ΞΌs (GIL-free) -└─ Type conversion: ~0.3ΞΌs - -Total: ~5.5ΞΌs per request -Cores used: All 14 (no GIL) -``` - -### Combined Performance -``` -Total latency: ~35.5ΞΌs = 0.0355ms -Theoretical max: 1 / 0.0000355s = 28,169 RPS per core -Actual: 184,370 RPS / 14 cores = 13,169 RPS per core - -Efficiency: 13,169 / 28,169 = 46.7% - -Bottleneck: Python handler execution -``` - ---- - -## Why Single Process is Better - -### Multiple Processes (Gunicorn Model) -``` -Process 1 (Core 1-2) -β”œβ”€ Memory: 50MB -β”œβ”€ Connections: 1,000 -└─ IPC overhead: High - -Process 2 (Core 3-4) -β”œβ”€ Memory: 50MB -β”œβ”€ Connections: 1,000 -└─ IPC overhead: High - -... - -Process 7 (Core 13-14) -β”œβ”€ Memory: 50MB -β”œβ”€ Connections: 1,000 -└─ IPC overhead: High - -Total: -β”œβ”€ Memory: 350MB (7 Γ— 50MB) -β”œβ”€ Connections: 7,000 max -β”œβ”€ Load balancing: Manual (nginx/HAProxy) -└─ Shared state: Requires Redis/DB -``` - -### Single Process (TurboAPI Model) -``` -Single Process (All 14 cores) -β”œβ”€ Memory: 50MB base -β”œβ”€ Connections: 7,168 concurrent -β”œβ”€ Load balancing: Automatic (work-stealing) -└─ Shared state: In-process (instant) - -Advantages: -βœ… Lower memory (50MB vs 350MB) -βœ… Higher capacity (7,168 vs 7,000) -βœ… No IPC overhead -βœ… Automatic load balancing -βœ… Shared memory access -βœ… Simpler deployment -``` - ---- - -## Verification Commands - -### Check CPU Utilization -```bash -# Start server -python examples/multi_route_app.py & -SERVER_PID=$! - -# Run benchmark -wrk -t4 -c200 -d30s http://127.0.0.1:8000/users/123 & - -# Monitor CPU -top -pid $SERVER_PID -stats pid,cpu,threads,mem - -# Expected output: -# PID CPU% THREADS MEM -# 12345 1400% 14 50MB -# ^^^^ ^^ -# All Tokio -# cores workers -``` - -### Check Thread Count -```bash -# macOS -ps -M $SERVER_PID | wc -l -# Expected: 14 threads - -# Linux -ps -T -p $SERVER_PID | wc -l -# Expected: 14 threads -``` - -### Check Memory Usage -```bash -# During benchmark -ps -o pid,rss,vsz -p $SERVER_PID - -# Expected: -# PID RSS VSZ -# 12345 51200 4294967296 -# ~50MB (virtual) -``` - ---- - -## Key Takeaways - -1. βœ… **Event-driven architecture** (Tokio async I/O) -2. βœ… **Single process, multi-threaded** (14 Tokio workers) -3. βœ… **All cores utilized** (~1400% CPU usage) -4. βœ… **Async tasks, not OS threads** (2KB vs 8MB) -5. βœ… **Work-stealing scheduler** (automatic load balancing) -6. βœ… **No process replication needed** (more efficient than multi-process) -7. βœ… **GIL-free Python** (Python 3.13t/3.14t free-threading) -8. βœ… **Zero-copy optimizations** (Rust ↔ Python FFI) - -**TurboAPI achieves high performance through modern async I/O architecture, not by spawning multiple processes.** diff --git a/docs/AUTHENTICATION_GUIDE.md b/docs/AUTHENTICATION_GUIDE.md deleted file mode 100644 index 8c70dba..0000000 --- a/docs/AUTHENTICATION_GUIDE.md +++ /dev/null @@ -1,797 +0,0 @@ -# TurboAPI Authentication & Middleware Guide πŸ” - -Complete guide to authentication, authorization, and security middleware in TurboAPI v0.4.0. - -**Performance**: All authentication middleware runs in Rust with zero Python overhead! - -> **πŸš€ Quick Start**: Jump to [Complete Working Example](#-complete-working-example) to see a full demo with all authentication methods! - ---- - -## πŸ“š Table of Contents - -1. [Quick Start](#quick-start) -2. [HTTP Authentication](#http-authentication) -3. [OAuth2 & JWT](#oauth2--jwt) -4. [API Keys](#api-keys) -5. [Custom Middleware](#custom-middleware) -6. [Advanced Patterns](#advanced-patterns) -7. [Performance Notes](#performance-notes) - ---- - -## πŸš€ Quick Start - -### Basic HTTP Authentication - -```python -from turboapi import TurboAPI -from turboapi.security import HTTPBasic, HTTPBasicCredentials -import secrets - -app = TurboAPI() -security = HTTPBasic() - -def verify_credentials(credentials: HTTPBasicCredentials): - """Verify username and password""" - correct_username = secrets.compare_digest(credentials.username, "admin") - correct_password = secrets.compare_digest(credentials.password, "secret") - - if not (correct_username and correct_password): - raise HTTPException( - status_code=401, - detail="Incorrect username or password", - headers={"WWW-Authenticate": "Basic"}, - ) - return credentials.username - -@app.get("/protected") -def protected_route(credentials: HTTPBasicCredentials = Depends(security)): - username = verify_credentials(credentials) - return {"message": f"Hello {username}!", "authenticated": True} -``` - -**Test it:** -```bash -curl -u admin:secret http://localhost:8000/protected -``` - ---- - -## πŸ”’ HTTP Authentication - -### 1. HTTP Basic Authentication - -**Use case**: Simple username/password authentication - -```python -from turboapi import TurboAPI -from turboapi.security import HTTPBasic, HTTPBasicCredentials, Depends -from turboapi.exceptions import HTTPException -import secrets - -app = TurboAPI() -security = HTTPBasic() - -# In-memory user database (use real database in production!) -USERS_DB = { - "admin": { - "username": "admin", - "password": "secret123", # Hash this in production! - "role": "admin" - }, - "user": { - "username": "user", - "password": "pass456", - "role": "user" - } -} - -def authenticate_user(credentials: HTTPBasicCredentials): - """Authenticate and return user info""" - user = USERS_DB.get(credentials.username) - - if not user: - raise HTTPException( - status_code=401, - detail="Invalid credentials", - headers={"WWW-Authenticate": "Basic"}, - ) - - # Use secrets.compare_digest to prevent timing attacks - if not secrets.compare_digest(credentials.password, user["password"]): - raise HTTPException( - status_code=401, - detail="Invalid credentials", - headers={"WWW-Authenticate": "Basic"}, - ) - - return user - -@app.get("/admin") -def admin_only(credentials: HTTPBasicCredentials = Depends(security)): - user = authenticate_user(credentials) - - if user["role"] != "admin": - raise HTTPException(status_code=403, detail="Admin access required") - - return {"message": "Admin area", "user": user["username"]} - -@app.get("/user") -def user_area(credentials: HTTPBasicCredentials = Depends(security)): - user = authenticate_user(credentials) - return {"message": "User area", "user": user["username"], "role": user["role"]} -``` - -### 2. HTTP Bearer Authentication - -**Use case**: Token-based authentication (JWT, API tokens) - -```python -from turboapi import TurboAPI -from turboapi.security import HTTPBearer, HTTPAuthorizationCredentials, Depends -from turboapi.exceptions import HTTPException -import secrets - -app = TurboAPI() -security = HTTPBearer() - -# In-memory token store (use Redis/database in production!) -VALID_TOKENS = { - "secret-token-123": {"user_id": 1, "username": "alice", "role": "admin"}, - "secret-token-456": {"user_id": 2, "username": "bob", "role": "user"}, -} - -def verify_token(credentials: HTTPAuthorizationCredentials): - """Verify bearer token and return user info""" - token = credentials.credentials - - user = VALID_TOKENS.get(token) - if not user: - raise HTTPException( - status_code=401, - detail="Invalid or expired token", - headers={"WWW-Authenticate": "Bearer"}, - ) - - return user - -@app.get("/api/profile") -def get_profile(credentials: HTTPAuthorizationCredentials = Depends(security)): - user = verify_token(credentials) - return { - "user_id": user["user_id"], - "username": user["username"], - "role": user["role"] - } - -@app.post("/api/data") -def create_data( - name: str, - credentials: HTTPAuthorizationCredentials = Depends(security) -): - user = verify_token(credentials) - return { - "message": "Data created", - "name": name, - "created_by": user["username"] - } -``` - -**Test it:** -```bash -curl -H "Authorization: Bearer secret-token-123" http://localhost:8000/api/profile -``` - ---- - -## 🎫 OAuth2 & JWT - -### OAuth2 Password Flow - -```python -from turboapi import TurboAPI -from turboapi.security import OAuth2PasswordBearer, Depends -from turboapi.exceptions import HTTPException -import jwt -from datetime import datetime, timedelta - -app = TurboAPI() -oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token") - -# Configuration -SECRET_KEY = "your-secret-key-here" # Use environment variable in production! -ALGORITHM = "HS256" -ACCESS_TOKEN_EXPIRE_MINUTES = 30 - -# User database -USERS_DB = { - "alice": {"username": "alice", "password": "secret", "email": "alice@example.com"}, - "bob": {"username": "bob", "password": "pass123", "email": "bob@example.com"}, -} - -def create_access_token(data: dict): - """Create JWT access token""" - to_encode = data.copy() - expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) - to_encode.update({"exp": expire}) - - encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) - return encoded_jwt - -def verify_token(token: str): - """Verify JWT token and return user""" - try: - payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) - username: str = payload.get("sub") - - if username is None: - raise HTTPException(status_code=401, detail="Invalid token") - - user = USERS_DB.get(username) - if user is None: - raise HTTPException(status_code=401, detail="User not found") - - return user - except jwt.ExpiredSignatureError: - raise HTTPException(status_code=401, detail="Token expired") - except jwt.JWTError: - raise HTTPException(status_code=401, detail="Invalid token") - -@app.post("/token") -def login(username: str, password: str): - """Login endpoint - returns JWT token""" - user = USERS_DB.get(username) - - if not user or user["password"] != password: - raise HTTPException( - status_code=401, - detail="Incorrect username or password" - ) - - access_token = create_access_token(data={"sub": username}) - return { - "access_token": access_token, - "token_type": "bearer", - "expires_in": ACCESS_TOKEN_EXPIRE_MINUTES * 60 - } - -@app.get("/users/me") -def get_current_user(token: str = Depends(oauth2_scheme)): - """Get current user from token""" - user = verify_token(token) - return { - "username": user["username"], - "email": user["email"] - } - -@app.get("/users/me/items") -def read_user_items(token: str = Depends(oauth2_scheme)): - """Protected endpoint example""" - user = verify_token(token) - return { - "items": [ - {"id": 1, "name": "Item 1", "owner": user["username"]}, - {"id": 2, "name": "Item 2", "owner": user["username"]}, - ] - } -``` - -**Usage:** -```bash -# 1. Login to get token -curl -X POST http://localhost:8000/token \ - -d "username=alice&password=secret" - -# Response: {"access_token": "eyJ...", "token_type": "bearer"} - -# 2. Use token to access protected endpoints -curl -H "Authorization: Bearer eyJ..." http://localhost:8000/users/me -``` - ---- - -## πŸ”‘ API Keys - -### Header-based API Keys - -```python -from turboapi import TurboAPI -from turboapi.security import APIKeyHeader, Depends -from turboapi.exceptions import HTTPException - -app = TurboAPI() -api_key_header = APIKeyHeader(name="X-API-Key") - -# API key database -API_KEYS = { - "sk-prod-abc123": {"name": "Production Key", "rate_limit": 10000}, - "sk-dev-xyz789": {"name": "Development Key", "rate_limit": 1000}, -} - -def verify_api_key(api_key: str = Depends(api_key_header)): - """Verify API key""" - key_info = API_KEYS.get(api_key) - - if not key_info: - raise HTTPException( - status_code=403, - detail="Invalid API key" - ) - - return key_info - -@app.get("/api/data") -def get_data(key_info: dict = Depends(verify_api_key)): - return { - "data": "sensitive information", - "key_name": key_info["name"], - "rate_limit": key_info["rate_limit"] - } -``` - -**Test it:** -```bash -curl -H "X-API-Key: sk-prod-abc123" http://localhost:8000/api/data -``` - -### Query Parameter API Keys - -```python -from turboapi.security import APIKeyQuery - -api_key_query = APIKeyQuery(name="api_key") - -@app.get("/public/data") -def get_public_data(api_key: str = Depends(api_key_query)): - if api_key not in API_KEYS: - raise HTTPException(status_code=403, detail="Invalid API key") - - return {"data": "public information"} -``` - -**Test it:** -```bash -curl "http://localhost:8000/public/data?api_key=sk-prod-abc123" -``` - -### Cookie-based API Keys - -```python -from turboapi.security import APIKeyCookie - -api_key_cookie = APIKeyCookie(name="session_key") - -@app.get("/dashboard") -def dashboard(session_key: str = Depends(api_key_cookie)): - if session_key not in API_KEYS: - raise HTTPException(status_code=403, detail="Invalid session") - - return {"message": "Dashboard data"} -``` - ---- - -## πŸ› οΈ Custom Middleware - -### Rate Limiting Middleware - -```python -from turboapi import TurboAPI -from turboapi.middleware import BaseMiddleware -from collections import defaultdict -from datetime import datetime, timedelta - -app = TurboAPI() - -class RateLimitMiddleware(BaseMiddleware): - def __init__(self, requests_per_minute: int = 60): - self.requests_per_minute = requests_per_minute - self.requests = defaultdict(list) - - async def process_request(self, request): - """Check rate limit before processing request""" - client_ip = request.headers.get("X-Forwarded-For", "unknown") - now = datetime.now() - - # Clean old requests - self.requests[client_ip] = [ - req_time for req_time in self.requests[client_ip] - if now - req_time < timedelta(minutes=1) - ] - - # Check limit - if len(self.requests[client_ip]) >= self.requests_per_minute: - raise HTTPException( - status_code=429, - detail="Rate limit exceeded", - headers={"Retry-After": "60"} - ) - - # Add current request - self.requests[client_ip].append(now) - - return request - -# Add middleware -app.add_middleware(RateLimitMiddleware, requests_per_minute=100) -``` - -### Authentication Middleware - -```python -class AuthenticationMiddleware(BaseMiddleware): - def __init__(self, exclude_paths: list = None): - self.exclude_paths = exclude_paths or ["/login", "/health"] - - async def process_request(self, request): - """Verify authentication for all requests except excluded paths""" - path = request.url.path - - # Skip authentication for excluded paths - if path in self.exclude_paths: - return request - - # Check for authorization header - auth_header = request.headers.get("Authorization") - - if not auth_header or not auth_header.startswith("Bearer "): - raise HTTPException( - status_code=401, - detail="Missing or invalid authorization header", - headers={"WWW-Authenticate": "Bearer"} - ) - - token = auth_header.replace("Bearer ", "") - - # Verify token (implement your logic) - if not self.verify_token(token): - raise HTTPException(status_code=401, detail="Invalid token") - - return request - - def verify_token(self, token: str) -> bool: - """Implement token verification""" - return token in VALID_TOKENS - -# Add middleware -app.add_middleware( - AuthenticationMiddleware, - exclude_paths=["/login", "/health", "/docs"] -) -``` - ---- - -## 🎯 Advanced Patterns - -### Role-Based Access Control (RBAC) - -```python -from turboapi import TurboAPI -from turboapi.security import HTTPBearer, Depends -from turboapi.exceptions import HTTPException -from enum import Enum - -app = TurboAPI() -security = HTTPBearer() - -class Role(str, Enum): - ADMIN = "admin" - USER = "user" - GUEST = "guest" - -# User database with roles -USERS = { - "token-admin": {"username": "alice", "role": Role.ADMIN}, - "token-user": {"username": "bob", "role": Role.USER}, - "token-guest": {"username": "charlie", "role": Role.GUEST}, -} - -def get_current_user(credentials = Depends(security)): - """Get current user from token""" - token = credentials.credentials - user = USERS.get(token) - - if not user: - raise HTTPException(status_code=401, detail="Invalid token") - - return user - -def require_role(required_role: Role): - """Dependency to check user role""" - def role_checker(user: dict = Depends(get_current_user)): - if user["role"] != required_role: - raise HTTPException( - status_code=403, - detail=f"Requires {required_role} role" - ) - return user - return role_checker - -# Admin-only endpoint -@app.get("/admin/users") -def list_users(user: dict = Depends(require_role(Role.ADMIN))): - return {"users": list(USERS.values()), "requested_by": user["username"]} - -# User or Admin endpoint -@app.get("/data") -def get_data(user: dict = Depends(get_current_user)): - if user["role"] not in [Role.ADMIN, Role.USER]: - raise HTTPException(status_code=403, detail="Insufficient permissions") - - return {"data": "sensitive information", "user": user["username"]} - -# Public endpoint (no authentication) -@app.get("/public") -def public_data(): - return {"message": "Public data - no authentication required"} -``` - -### Multi-Factor Authentication (MFA) - -```python -import pyotp -from turboapi import TurboAPI -from turboapi.exceptions import HTTPException - -app = TurboAPI() - -# User database with MFA secrets -USERS_MFA = { - "alice": { - "password": "secret", - "mfa_secret": pyotp.random_base32(), - "mfa_enabled": True - } -} - -@app.post("/login/mfa") -def login_with_mfa(username: str, password: str, mfa_code: str): - """Login with username, password, and MFA code""" - user = USERS_MFA.get(username) - - if not user or user["password"] != password: - raise HTTPException(status_code=401, detail="Invalid credentials") - - if user["mfa_enabled"]: - totp = pyotp.TOTP(user["mfa_secret"]) - - if not totp.verify(mfa_code): - raise HTTPException(status_code=401, detail="Invalid MFA code") - - # Generate session token - token = create_access_token({"sub": username}) - return {"access_token": token, "token_type": "bearer"} - -@app.get("/mfa/setup") -def setup_mfa(username: str): - """Get MFA setup information""" - user = USERS_MFA.get(username) - - if not user: - raise HTTPException(status_code=404, detail="User not found") - - totp = pyotp.TOTP(user["mfa_secret"]) - - return { - "secret": user["mfa_secret"], - "qr_code_url": totp.provisioning_uri( - name=username, - issuer_name="TurboAPI" - ) - } -``` - -### Session Management - -```python -from turboapi import TurboAPI -from turboapi.responses import Response -import secrets -from datetime import datetime, timedelta - -app = TurboAPI() - -# Session store (use Redis in production!) -SESSIONS = {} - -def create_session(user_id: int): - """Create new session""" - session_id = secrets.token_urlsafe(32) - SESSIONS[session_id] = { - "user_id": user_id, - "created_at": datetime.now(), - "expires_at": datetime.now() + timedelta(hours=24) - } - return session_id - -def get_session(session_id: str): - """Get session if valid""" - session = SESSIONS.get(session_id) - - if not session: - return None - - if datetime.now() > session["expires_at"]: - del SESSIONS[session_id] - return None - - return session - -@app.post("/login/session") -def login_session(username: str, password: str, response: Response): - """Login and create session cookie""" - # Verify credentials (simplified) - if username != "alice" or password != "secret": - raise HTTPException(status_code=401, detail="Invalid credentials") - - # Create session - session_id = create_session(user_id=1) - - # Set cookie - response.set_cookie( - key="session_id", - value=session_id, - httponly=True, - secure=True, # HTTPS only - samesite="lax", - max_age=86400 # 24 hours - ) - - return {"message": "Logged in successfully"} - -@app.get("/profile") -def get_profile(session_id: str = Cookie(None)): - """Get user profile from session""" - if not session_id: - raise HTTPException(status_code=401, detail="Not authenticated") - - session = get_session(session_id) - - if not session: - raise HTTPException(status_code=401, detail="Invalid or expired session") - - return {"user_id": session["user_id"], "session_valid": True} - -@app.post("/logout") -def logout(session_id: str = Cookie(None), response: Response): - """Logout and clear session""" - if session_id and session_id in SESSIONS: - del SESSIONS[session_id] - - response.delete_cookie("session_id") - return {"message": "Logged out successfully"} -``` - ---- - -## ⚑ Performance Notes - -### TurboAPI Authentication Performance - -**All authentication middleware runs in Rust with zero Python overhead!** - -**Benchmark Results:** -- **Basic Auth**: 70K+ RPS (same as unprotected endpoints!) -- **Bearer Token**: 68K+ RPS (minimal overhead) -- **API Key**: 71K+ RPS (fastest - simple header check) -- **JWT Verification**: 50K+ RPS (Python JWT library overhead) - -**Performance Tips:** - -1. **Use API Keys for highest performance** - ```python - # Fastest - simple string comparison in Rust - api_key_header = APIKeyHeader(name="X-API-Key") - ``` - -2. **Cache JWT verification results** - ```python - from functools import lru_cache - - @lru_cache(maxsize=1000) - def verify_token_cached(token: str): - return jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) - ``` - -3. **Use Redis for session storage** - ```python - import redis - - redis_client = redis.Redis(host='localhost', port=6379, db=0) - - def get_session(session_id: str): - return redis_client.get(f"session:{session_id}") - ``` - -4. **Implement token refresh** - ```python - @app.post("/token/refresh") - def refresh_token(refresh_token: str): - # Verify refresh token - # Issue new access token - return {"access_token": new_token} - ``` - ---- - -## πŸ” Security Best Practices - -1. **Always use HTTPS in production** -2. **Hash passwords with bcrypt/argon2** -3. **Use environment variables for secrets** -4. **Implement rate limiting** -5. **Set secure cookie flags** (httponly, secure, samesite) -6. **Validate and sanitize all inputs** -7. **Use short-lived access tokens** -8. **Implement token refresh mechanism** -9. **Log authentication events** -10. **Use CORS middleware properly** - ---- - -## πŸ“š Complete Working Example - -**[examples/authentication_demo.py](../examples/authentication_demo.py)** - Full-featured authentication demo - -**Features:** -- βœ… Bearer Token Authentication -- βœ… API Key Authentication (header-based) -- βœ… Role-Based Access Control (admin/user/guest) -- βœ… Login/Logout endpoints -- βœ… Protected routes with different permission levels -- βœ… User management (admin only) -- βœ… Statistics endpoint -- βœ… **70K+ RPS** with authentication enabled! - -**Test Users:** -``` -alice / secret123 (admin) - API Key: sk-alice-prod-abc123 -bob / pass456 (user) - API Key: sk-bob-dev-xyz789 -charlie / guest789 (guest) - API Key: sk-charlie-test-123 -``` - -**Run it:** -```bash -python examples/authentication_demo.py -# Server starts on http://localhost:8000 -``` - -**Try it:** -```bash -# 1. Login to get token -curl -X POST http://localhost:8000/login \ - -H "Content-Type: application/json" \ - -d '{"username": "alice", "password": "secret123"}' - -# Response: {"access_token": "token-admin-alice", ...} - -# 2. Use token to access protected endpoint -curl http://localhost:8000/profile \ - -H "Authorization: Bearer token-admin-alice" - -# 3. Use API key -curl http://localhost:8000/api/data \ - -H "X-API-Key: sk-alice-prod-abc123" - -# 4. Admin endpoint (requires admin role) -curl http://localhost:8000/admin/users \ - -H "Authorization: Bearer token-admin-alice" -``` - ---- - -## 🎯 Summary - -TurboAPI provides **FastAPI-compatible** authentication with **10x better performance**: - -- βœ… HTTP Basic/Bearer/Digest authentication -- βœ… OAuth2 & JWT support -- βœ… API Keys (header/query/cookie) -- βœ… Custom middleware -- βœ… RBAC & permissions -- βœ… Session management -- βœ… **70K+ RPS** with authentication enabled! - -**Performance**: All middleware runs in Rust - zero Python overhead! πŸš€ diff --git a/setup_python313t.sh b/setup_python313t.sh deleted file mode 100755 index 40f0cf6..0000000 --- a/setup_python313t.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -# Setup Python 3.13 Free-Threading as Default -# Run this script to switch your default Python to 3.13 free-threading - -set -e - -echo "🐍 Python 3.13 Free-Threading Setup" -echo "====================================" -echo "" - -# Check current Python -echo "Current Python versions:" -which -a python python3 python3.13 2>/dev/null || true -echo "" -python3 --version 2>/dev/null || echo "python3 not found" -echo "" - -# Check if python3.13t exists -if command -v python3.13t &> /dev/null; then - echo "βœ… python3.13t found at: $(which python3.13t)" - python3.13t --version -else - echo "❌ python3.13t not found!" - echo "" - echo "πŸ“₯ You need to install Python 3.13 with free-threading support." - echo "" - echo "Option 1 - Download from python.org (EASIEST):" - echo " Visit: https://www.python.org/downloads/" - echo " Look for: 'Python 3.13.x with experimental free-threading support'" - echo " Download and run the macOS installer" - echo "" - echo "Option 2 - Build from source (ADVANCED):" - echo " cd /tmp" - echo " git clone https://github.com/python/cpython.git" - echo " cd cpython && git checkout v3.13.4" - echo " ./configure --enable-experimental-freethreading --prefix=\$HOME/python313t" - echo " make -j\$(sysctl -n hw.ncpu) && make install" - echo " # Then add ~/python313t/bin to your PATH" - echo "" - echo "After installing, run this script again!" - exit 1 -fi - -echo "" -echo "πŸ”§ Setting up shell aliases..." - -# Backup zshrc -if [ -f ~/.zshrc ]; then - cp ~/.zshrc ~/.zshrc.backup.$(date +%Y%m%d_%H%M%S) - echo "βœ… Backed up ~/.zshrc" -fi - -# Check if aliases already exist -if grep -q "alias python=\"python3.13t\"" ~/.zshrc 2>/dev/null; then - echo "⚠️ Aliases already exist in ~/.zshrc" -else - echo "" >> ~/.zshrc - echo "# Python 3.13 Free-Threading (added $(date))" >> ~/.zshrc - echo 'alias python="python3.13t"' >> ~/.zshrc - echo 'alias python3="python3.13t"' >> ~/.zshrc - echo 'alias pip="python3.13t -m pip"' >> ~/.zshrc - echo 'alias pip3="python3.13t -m pip"' >> ~/.zshrc - echo "βœ… Added aliases to ~/.zshrc" -fi - -echo "" -echo "🎯 Testing free-threading..." -python3.13t -c " -import sys -has_gil = hasattr(sys, '_current_frames') -print(f'Python Version: {sys.version}') -print(f'Has GIL: {has_gil}') -print(f'Free-threading: {not has_gil}') - -if not has_gil: - print('βœ… FREE-THREADING IS ENABLED!') -else: - print('❌ WARNING: GIL is still present - this may not be a free-threading build') -" - -echo "" -echo "✨ Setup complete!" -echo "" -echo "⚠️ IMPORTANT: Run this command to activate the changes:" -echo " source ~/.zshrc" -echo "" -echo "Then test with:" -echo " python --version" -echo " python -c \"import sys; print('Free-threading:', not hasattr(sys, '_current_frames'))\"" -echo "" -echo "πŸš€ To use TurboAPI with free-threading:" -echo " cd /Users/rachpradhan/rusty/turboAPI" -echo " python3.13t -m venv .venv-freethreading" -echo " source .venv-freethreading/bin/activate" -echo " pip install -e python/" -echo " maturin develop --manifest-path Cargo.toml" -echo "" diff --git a/test_package_integrity.py b/test_package_integrity.py deleted file mode 100755 index a671043..0000000 --- a/test_package_integrity.py +++ /dev/null @@ -1,339 +0,0 @@ -#!/usr/bin/env python3 -""" -Test Package Integrity - Validates Rust module bundling before release - -This script ensures that: -1. The Rust core (turbonet) is properly bundled -2. All imports work correctly -3. Basic functionality is operational - -Run this before committing/releasing to catch bundling issues early! -""" - -import sys -import subprocess -import tempfile -import shutil -from pathlib import Path - - -def print_status(message, status="info"): - """Print colored status messages""" - colors = { - "info": "\033[94m", # Blue - "success": "\033[92m", # Green - "warning": "\033[93m", # Yellow - "error": "\033[91m", # Red - "reset": "\033[0m" - } - - symbols = { - "info": "ℹ️", - "success": "βœ…", - "warning": "⚠️", - "error": "❌" - } - - color = colors.get(status, colors["info"]) - symbol = symbols.get(status, "") - reset = colors["reset"] - - print(f"{color}{symbol} {message}{reset}") - - -def run_command(cmd, cwd=None, capture_output=True): - """Run a shell command and return the result""" - try: - result = subprocess.run( - cmd, - shell=True, - cwd=cwd, - capture_output=capture_output, - text=True, - check=True - ) - return result.stdout if capture_output else None - except subprocess.CalledProcessError as e: - print_status(f"Command failed: {cmd}", "error") - if capture_output: - print(f"STDOUT: {e.stdout}") - print(f"STDERR: {e.stderr}") - return None - - -def test_local_development_install(): - """Test 1: Verify local development install works""" - print_status("Test 1: Testing local development install...", "info") - - # Build and install in development mode - result = run_command("cd python && maturin develop --release") - - if result is None: - print_status("Failed to build with maturin", "error") - return False - - print_status("Local development build successful", "success") - return True - - -def test_rust_module_import(): - """Test 2: Verify Rust module can be imported""" - print_status("Test 2: Testing Rust module import...", "info") - - test_code = """ -import sys -try: - from turboapi import turbonet - print("SUCCESS: turbonet imported") - print(f"Available: {hasattr(turbonet, 'TurboServer')}") - sys.exit(0) -except ImportError as e: - print(f"FAILED: {e}") - sys.exit(1) -""" - - result = subprocess.run( - [sys.executable, "-c", test_code], - capture_output=True, - text=True - ) - - if result.returncode != 0: - print_status("Failed to import turbonet module", "error") - print(result.stdout) - print(result.stderr) - return False - - if "SUCCESS" in result.stdout and "True" in result.stdout: - print_status("Rust module imported successfully", "success") - return True - else: - print_status("Rust module import incomplete", "error") - print(result.stdout) - return False - - -def test_turboapi_basic_functionality(): - """Test 3: Verify basic TurboAPI functionality""" - print_status("Test 3: Testing basic TurboAPI functionality...", "info") - - test_code = """ -import sys -try: - from turboapi import TurboAPI - - # Create app - app = TurboAPI(title="Test App") - - # Add a simple route - @app.get("/test") - def test_route(): - return {"status": "ok"} - - # Check that route was registered - if hasattr(app, 'routes') and len(app.routes) > 0: - print("SUCCESS: Route registered") - sys.exit(0) - else: - print("FAILED: Route not registered") - sys.exit(1) - -except Exception as e: - print(f"FAILED: {e}") - import traceback - traceback.print_exc() - sys.exit(1) -""" - - result = subprocess.run( - [sys.executable, "-c", test_code], - capture_output=True, - text=True - ) - - if result.returncode != 0: - print_status("Basic functionality test failed", "error") - print(result.stdout) - print(result.stderr) - return False - - if "SUCCESS" in result.stdout: - print_status("Basic functionality works", "success") - return True - else: - print_status("Basic functionality incomplete", "error") - return False - - -def test_wheel_build(): - """Test 4: Verify wheel can be built""" - print_status("Test 4: Testing wheel build...", "info") - - # Create temporary directory for wheel - with tempfile.TemporaryDirectory() as tmpdir: - print_status(f"Building wheel in {tmpdir}...", "info") - - result = run_command( - f"cd python && maturin build --release --out {tmpdir}", - capture_output=True - ) - - if result is None: - print_status("Failed to build wheel", "error") - return False - - # Check if wheel was created - wheels = list(Path(tmpdir).glob("*.whl")) - - if not wheels: - print_status("No wheel file found", "error") - return False - - wheel_path = wheels[0] - print_status(f"Wheel built successfully: {wheel_path.name}", "success") - - # Inspect wheel contents - print_status("Inspecting wheel contents...", "info") - result = run_command(f"unzip -l {wheel_path}", capture_output=True) - - if result and "turbonet" in result: - print_status("Rust module found in wheel βœ“", "success") - - # Show relevant files - lines = [line for line in result.split('\n') if 'turbonet' in line or 'turboapi' in line] - for line in lines[:10]: # Show first 10 relevant files - print(f" {line.strip()}") - - return True - else: - print_status("Rust module NOT found in wheel βœ—", "error") - print("Wheel contents:") - print(result) - return False - - -def test_wheel_install_in_venv(): - """Test 5: Verify wheel installs correctly in fresh venv""" - print_status("Test 5: Testing wheel install in fresh venv...", "info") - - with tempfile.TemporaryDirectory() as tmpdir: - venv_dir = Path(tmpdir) / "test_venv" - wheel_dir = Path(tmpdir) / "wheels" - wheel_dir.mkdir() - - # Build wheel - print_status("Building wheel...", "info") - result = run_command( - f"cd python && maturin build --release --out {wheel_dir}", - capture_output=True - ) - - if result is None: - print_status("Failed to build wheel", "error") - return False - - wheels = list(wheel_dir.glob("*.whl")) - if not wheels: - print_status("No wheel found", "error") - return False - - wheel_path = wheels[0] - - # Create venv - print_status("Creating test virtual environment...", "info") - result = run_command(f"{sys.executable} -m venv {venv_dir}") - - if result is None: - print_status("Failed to create venv", "error") - return False - - # Install wheel in venv - pip_path = venv_dir / "bin" / "pip" - python_path = venv_dir / "bin" / "python" - - print_status(f"Installing wheel: {wheel_path.name}", "info") - result = run_command(f"{pip_path} install {wheel_path}") - - if result is None: - print_status("Failed to install wheel", "error") - return False - - # Test import in venv - print_status("Testing import in venv...", "info") - test_code = """ -try: - from turboapi import turbonet - from turboapi import TurboAPI - app = TurboAPI() - print("SUCCESS") -except Exception as e: - print(f"FAILED: {e}") - import traceback - traceback.print_exc() -""" - - result = run_command( - f"{python_path} -c '{test_code}'", - capture_output=True - ) - - if result and "SUCCESS" in result: - print_status("Wheel installs and imports correctly βœ“", "success") - return True - else: - print_status("Wheel install/import failed βœ—", "error") - print(result) - return False - - -def main(): - """Run all tests""" - print("\n" + "=" * 70) - print("πŸ§ͺ TurboAPI Package Integrity Test Suite") - print("=" * 70 + "\n") - - tests = [ - ("Local Development Install", test_local_development_install), - ("Rust Module Import", test_rust_module_import), - ("Basic Functionality", test_turboapi_basic_functionality), - ("Wheel Build", test_wheel_build), - ("Wheel Install in Venv", test_wheel_install_in_venv), - ] - - results = {} - - for test_name, test_func in tests: - print(f"\n{'─' * 70}") - try: - results[test_name] = test_func() - except Exception as e: - print_status(f"Test crashed: {e}", "error") - import traceback - traceback.print_exc() - results[test_name] = False - - # Summary - print(f"\n{'=' * 70}") - print("πŸ“Š Test Summary") - print("=" * 70 + "\n") - - for test_name, passed in results.items(): - status = "success" if passed else "error" - print_status(f"{test_name}: {'PASSED' if passed else 'FAILED'}", status) - - total = len(results) - passed = sum(results.values()) - - print(f"\n{'=' * 70}") - if passed == total: - print_status(f"All {total} tests passed! ✨", "success") - print_status("Package is ready for release! πŸš€", "success") - return 0 - else: - print_status(f"{passed}/{total} tests passed", "warning") - print_status("Fix issues before releasing!", "error") - return 1 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/test_simple_post.py b/test_simple_post.py deleted file mode 100644 index 97eca9a..0000000 --- a/test_simple_post.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 -"""Simple test to debug POST body parsing""" - -from turboapi import TurboAPI -import time -import threading -import requests - -app = TurboAPI(title="Simple Test") - -@app.post("/test") -def handler(request_data: dict): - print(f"Handler called with: {request_data}") - return {"received": request_data} - -# Start server -def start_server(): - app.run(host="127.0.0.1", port=9000) - -server_thread = threading.Thread(target=start_server, daemon=True) -server_thread.start() -time.sleep(3) - -# Test -print("Sending request...") -response = requests.post("http://127.0.0.1:9000/test", json={"key": "value"}) -print(f"Status: {response.status_code}") -print(f"Response: {response.text}") diff --git a/turbo_vs_fastapi_benchmark_20250929_025526.json b/turbo_vs_fastapi_benchmark_20250929_025526.json deleted file mode 100644 index f9b1327..0000000 --- a/turbo_vs_fastapi_benchmark_20250929_025526.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "timestamp": "2025-09-29T02:55:26.118359", - "benchmark_type": "TurboAPI vs FastAPI Performance Comparison", - "python_version": "3.13.1 experimental free-threading build (main, Dec 6 2024, 20:34:21) [Clang 18.1.8 ]", - "platform": { - "sysname": "Darwin", - "nodename": "Mac", - "release": "25.0.0", - "version": "Darwin Kernel Version 25.0.0: Mon Aug 25 21:17:54 PDT 2025; root:xnu-12377.1.9~3/RELEASE_ARM64_T6041", - "machine": "arm64" - }, - "results": { - "TurboAPI": { - "Light Load": { - "latency_p50": "227.00us", - "latency_p90": "312.00us", - "latency_p99": "0.97ms", - "rps": 179227.26, - "transfer_rate": "38.16MB" - }, - "Medium Load": { - "latency_p50": "429.00us", - "latency_p90": "0.92ms", - "latency_p99": "15.56ms", - "rps": 166907.24, - "transfer_rate": "35.56MB" - }, - "Heavy Load": { - "latency_p50": "825.00us", - "latency_p90": "3.49ms", - "latency_p99": "19.80ms", - "rps": 167104.29, - "transfer_rate": "35.59MB" - } - }, - "FastAPI": { - "Light Load": { - "latency_p50": "7.01ms", - "latency_p90": "7.39ms", - "latency_p99": "7.99ms", - "rps": 6783.07, - "transfer_rate": "1.68MB" - }, - "Medium Load": { - "latency_p50": "13.78ms", - "latency_p90": "17.00ms", - "latency_p99": "20.21ms", - "rps": 6684.65, - "transfer_rate": "1.66MB" - }, - "Heavy Load": { - "latency_p50": "28.89ms", - "latency_p90": "31.37ms", - "latency_p99": "37.57ms", - "rps": 6544.67, - "transfer_rate": "1.62MB" - } - } - } -} \ No newline at end of file