diff --git a/examples/error_handling.py b/examples/error_handling.py new file mode 100644 index 00000000..ffb5fd23 --- /dev/null +++ b/examples/error_handling.py @@ -0,0 +1,309 @@ +""" +Error Handling and Resilience Patterns in Agentics + +This example demonstrates robust transduction patterns including: +- Retry logic with exponential backoff +- Graceful error handling and fallback strategies +- Partial result processing +- Input validation and sanitization +- Error categorization and reporting +""" + +import asyncio +import logging +from typing import Optional, List, Union +from enum import Enum + +from pydantic import BaseModel, Field, ValidationError +from agentics import AG + + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class ErrorType(str, Enum): + VALIDATION_ERROR = "validation_error" + LLM_ERROR = "llm_error" + TIMEOUT_ERROR = "timeout_error" + PARSE_ERROR = "parse_error" + UNKNOWN_ERROR = "unknown_error" + + +class RobustAnswer(BaseModel): + """A resilient answer type that captures both success and failure states.""" + + answer: Optional[str] = Field(None, description="The extracted answer") + confidence: Optional[float] = Field(None, ge=0.0, le=1.0, description="Confidence score 0-1") + source_question: Optional[str] = Field(None, description="Original input question") + error_type: Optional[ErrorType] = Field(None, description="Type of error if processing failed") + error_message: Optional[str] = Field(None, description="Detailed error message") + retry_count: int = Field(0, description="Number of retry attempts made") + processing_time_ms: Optional[float] = Field(None, description="Processing time in milliseconds") + is_success: bool = Field(True, description="Whether processing succeeded") + + +class ValidationResult(BaseModel): + """Result of input validation with sanitized input.""" + + is_valid: bool + sanitized_input: Optional[str] = None + validation_errors: List[str] = [] + + +def validate_and_sanitize_input(input_text: str) -> ValidationResult: + """Validate and sanitize input before processing.""" + + errors = [] + sanitized = input_text.strip() + + # Basic validation rules + if not sanitized: + errors.append("Empty input") + return ValidationResult(is_valid=False, validation_errors=errors) + + if len(sanitized) > 1000: + errors.append("Input too long, truncating") + sanitized = sanitized[:1000] + "..." + + # Remove potential harmful characters or patterns + forbidden_patterns = ["What is AI?", # Malicious input + "What happens when the LLM fails to respond properly and we need fallback handling?", # Complex case + "Simple question", # Normal case + ] + + # Create agent + if not AG.get_llm_provider(): + print("āš ļø No LLM provider configured. Set up your API key in .env file.") + return + + agent = AG(atype=RobustAnswer, llm=AG.get_llm_provider()) + + print(f"šŸ“ Processing {len(test_inputs)} test inputs with error handling...") + print() + + # Process with error handling + results = await safe_transduction_with_retry( + agent=agent, + input_data=test_inputs, + max_retries=2, + timeout_seconds=15.0 + ) + + # Display results + print("šŸ“Š RESULTS:") + print("-" * 50) + + for i, result in enumerate(results.states): + status = "āœ…" if result.is_success else "āŒ" + print(f"{status} Item {i+1}:") + print(f" Question: {result.source_question[:50]}...") + if result.is_success: + print(f" Answer: {result.answer}") + print(f" Confidence: {result.confidence}") + else: + print(f" Error: {result.error_type} - {result.error_message}") + print(f" Retries: {result.retry_count}, Time: {result.processing_time_ms:.1f}ms") + print() + + # Summary analysis + analysis = analyze_results(results) + print("šŸ“ˆ SUMMARY ANALYSIS:") + print("-" * 50) + print(f"Success Rate: {analysis['success_rate']:.1%} ({analysis['successful']}/{analysis['total_items']})") + print(f"Total Retries: {analysis['total_retries']}") + print(f"Avg Processing Time: {analysis['avg_processing_time_ms']:.1f}ms") + + if analysis['error_breakdown']: + print("\nError Breakdown:") + for error_type, count in analysis['error_breakdown'].items(): + print(f" {error_type}: {count}") + + print("\nšŸŽÆ Key Takeaways:") + print("- Input validation prevents processing of malformed data") + print("- Retry logic with exponential backoff handles transient failures") + print("- Structured error reporting aids debugging and monitoring") + print("- Partial success handling allows processing to continue despite failures") + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file